]> Git Repo - qemu.git/blame - exec.c
pckbd: adding new fields to vmstate
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
cc9e98cb 29#include "hw/qdev.h"
1de7afc9 30#include "qemu/osdep.h"
9c17d615 31#include "sysemu/kvm.h"
2ff3de68 32#include "sysemu/sysemu.h"
0d09e41a 33#include "hw/xen/xen.h"
1de7afc9
PB
34#include "qemu/timer.h"
35#include "qemu/config-file.h"
75a34036 36#include "qemu/error-report.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
220c3ebd 52#include "exec/ram_addr.h"
67d95c15 53
b35ba30f
MT
54#include "qemu/range.h"
55
db7b5426 56//#define DEBUG_SUBPAGE
1196be37 57
e2eef170 58#if !defined(CONFIG_USER_ONLY)
981fdf23 59static bool in_migration;
94a6b54f 60
a3161038 61RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
62
63static MemoryRegion *system_memory;
309cb471 64static MemoryRegion *system_io;
62152b8a 65
f6790af6
AK
66AddressSpace address_space_io;
67AddressSpace address_space_memory;
2673a5da 68
0844e007 69MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 70static MemoryRegion io_mem_unassigned;
0e0df1e2 71
7bd4f430
PB
72/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73#define RAM_PREALLOC (1 << 0)
74
dbcb8981
PB
75/* RAM is mmap-ed with MAP_SHARED */
76#define RAM_SHARED (1 << 1)
77
e2eef170 78#endif
9fa3e853 79
bdc44640 80struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
81/* current CPU in the current thread. It is only valid inside
82 cpu_exec() */
4917cf44 83DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 84/* 0 = Do not count executed instructions.
bf20dc07 85 1 = Precise instruction counting.
2e70f6ef 86 2 = Adaptive rate instruction counting. */
5708fc66 87int use_icount;
6a00d601 88
e2eef170 89#if !defined(CONFIG_USER_ONLY)
4346ae3e 90
1db8abb1
PB
91typedef struct PhysPageEntry PhysPageEntry;
92
93struct PhysPageEntry {
9736e55b 94 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 95 uint32_t skip : 6;
9736e55b 96 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 97 uint32_t ptr : 26;
1db8abb1
PB
98};
99
8b795765
MT
100#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
101
03f49957 102/* Size of the L2 (and L3, etc) page tables. */
57271d63 103#define ADDR_SPACE_BITS 64
03f49957 104
026736ce 105#define P_L2_BITS 9
03f49957
PB
106#define P_L2_SIZE (1 << P_L2_BITS)
107
108#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
109
110typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 111
53cb28cb
MA
112typedef struct PhysPageMap {
113 unsigned sections_nb;
114 unsigned sections_nb_alloc;
115 unsigned nodes_nb;
116 unsigned nodes_nb_alloc;
117 Node *nodes;
118 MemoryRegionSection *sections;
119} PhysPageMap;
120
1db8abb1
PB
121struct AddressSpaceDispatch {
122 /* This is a multi-level map on the physical address space.
123 * The bottom level has pointers to MemoryRegionSections.
124 */
125 PhysPageEntry phys_map;
53cb28cb 126 PhysPageMap map;
acc9d80b 127 AddressSpace *as;
1db8abb1
PB
128};
129
90260c6c
JK
130#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
131typedef struct subpage_t {
132 MemoryRegion iomem;
acc9d80b 133 AddressSpace *as;
90260c6c
JK
134 hwaddr base;
135 uint16_t sub_section[TARGET_PAGE_SIZE];
136} subpage_t;
137
b41aac4f
LPF
138#define PHYS_SECTION_UNASSIGNED 0
139#define PHYS_SECTION_NOTDIRTY 1
140#define PHYS_SECTION_ROM 2
141#define PHYS_SECTION_WATCH 3
5312bd8b 142
e2eef170 143static void io_mem_init(void);
62152b8a 144static void memory_map_init(void);
09daed84 145static void tcg_commit(MemoryListener *listener);
e2eef170 146
1ec9b909 147static MemoryRegion io_mem_watch;
6658ffb8 148#endif
fd6ce8f6 149
6d9a1304 150#if !defined(CONFIG_USER_ONLY)
d6f2ea22 151
53cb28cb 152static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 153{
53cb28cb
MA
154 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
155 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
156 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
157 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 158 }
f7bf5461
AK
159}
160
53cb28cb 161static uint32_t phys_map_node_alloc(PhysPageMap *map)
f7bf5461
AK
162{
163 unsigned i;
8b795765 164 uint32_t ret;
f7bf5461 165
53cb28cb 166 ret = map->nodes_nb++;
f7bf5461 167 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 168 assert(ret != map->nodes_nb_alloc);
03f49957 169 for (i = 0; i < P_L2_SIZE; ++i) {
53cb28cb
MA
170 map->nodes[ret][i].skip = 1;
171 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 172 }
f7bf5461 173 return ret;
d6f2ea22
AK
174}
175
53cb28cb
MA
176static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
177 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 178 int level)
f7bf5461
AK
179{
180 PhysPageEntry *p;
181 int i;
03f49957 182 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 183
9736e55b 184 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
53cb28cb
MA
185 lp->ptr = phys_map_node_alloc(map);
186 p = map->nodes[lp->ptr];
f7bf5461 187 if (level == 0) {
03f49957 188 for (i = 0; i < P_L2_SIZE; i++) {
9736e55b 189 p[i].skip = 0;
b41aac4f 190 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 191 }
67c4d23c 192 }
f7bf5461 193 } else {
53cb28cb 194 p = map->nodes[lp->ptr];
92e873b9 195 }
03f49957 196 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 197
03f49957 198 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 199 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 200 lp->skip = 0;
c19e8800 201 lp->ptr = leaf;
07f07b31
AK
202 *index += step;
203 *nb -= step;
2999097b 204 } else {
53cb28cb 205 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
206 }
207 ++lp;
f7bf5461
AK
208 }
209}
210
ac1970fb 211static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 212 hwaddr index, hwaddr nb,
2999097b 213 uint16_t leaf)
f7bf5461 214{
2999097b 215 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 216 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 217
53cb28cb 218 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
219}
220
b35ba30f
MT
221/* Compact a non leaf page entry. Simply detect that the entry has a single child,
222 * and update our entry so we can skip it and go directly to the destination.
223 */
224static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
225{
226 unsigned valid_ptr = P_L2_SIZE;
227 int valid = 0;
228 PhysPageEntry *p;
229 int i;
230
231 if (lp->ptr == PHYS_MAP_NODE_NIL) {
232 return;
233 }
234
235 p = nodes[lp->ptr];
236 for (i = 0; i < P_L2_SIZE; i++) {
237 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
238 continue;
239 }
240
241 valid_ptr = i;
242 valid++;
243 if (p[i].skip) {
244 phys_page_compact(&p[i], nodes, compacted);
245 }
246 }
247
248 /* We can only compress if there's only one child. */
249 if (valid != 1) {
250 return;
251 }
252
253 assert(valid_ptr < P_L2_SIZE);
254
255 /* Don't compress if it won't fit in the # of bits we have. */
256 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
257 return;
258 }
259
260 lp->ptr = p[valid_ptr].ptr;
261 if (!p[valid_ptr].skip) {
262 /* If our only child is a leaf, make this a leaf. */
263 /* By design, we should have made this node a leaf to begin with so we
264 * should never reach here.
265 * But since it's so simple to handle this, let's do it just in case we
266 * change this rule.
267 */
268 lp->skip = 0;
269 } else {
270 lp->skip += p[valid_ptr].skip;
271 }
272}
273
274static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
275{
276 DECLARE_BITMAP(compacted, nodes_nb);
277
278 if (d->phys_map.skip) {
53cb28cb 279 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
280 }
281}
282
97115a8d 283static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 284 Node *nodes, MemoryRegionSection *sections)
92e873b9 285{
31ab2b4a 286 PhysPageEntry *p;
97115a8d 287 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 288 int i;
f1f6e3b8 289
9736e55b 290 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 291 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 292 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 293 }
9affd6fc 294 p = nodes[lp.ptr];
03f49957 295 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 296 }
b35ba30f
MT
297
298 if (sections[lp.ptr].size.hi ||
299 range_covers_byte(sections[lp.ptr].offset_within_address_space,
300 sections[lp.ptr].size.lo, addr)) {
301 return &sections[lp.ptr];
302 } else {
303 return &sections[PHYS_SECTION_UNASSIGNED];
304 }
f3705d53
AK
305}
306
e5548617
BS
307bool memory_region_is_unassigned(MemoryRegion *mr)
308{
2a8e7499 309 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 310 && mr != &io_mem_watch;
fd6ce8f6 311}
149f54b5 312
c7086b4a 313static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
314 hwaddr addr,
315 bool resolve_subpage)
9f029603 316{
90260c6c
JK
317 MemoryRegionSection *section;
318 subpage_t *subpage;
319
53cb28cb 320 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
321 if (resolve_subpage && section->mr->subpage) {
322 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 323 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
324 }
325 return section;
9f029603
JK
326}
327
90260c6c 328static MemoryRegionSection *
c7086b4a 329address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 330 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
331{
332 MemoryRegionSection *section;
a87f3954 333 Int128 diff;
149f54b5 334
c7086b4a 335 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
336 /* Compute offset within MemoryRegionSection */
337 addr -= section->offset_within_address_space;
338
339 /* Compute offset within MemoryRegion */
340 *xlat = addr + section->offset_within_region;
341
342 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 343 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
344 return section;
345}
90260c6c 346
a87f3954
PB
347static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
348{
349 if (memory_region_is_ram(mr)) {
350 return !(is_write && mr->readonly);
351 }
352 if (memory_region_is_romd(mr)) {
353 return !is_write;
354 }
355
356 return false;
357}
358
5c8a00ce
PB
359MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
360 hwaddr *xlat, hwaddr *plen,
361 bool is_write)
90260c6c 362{
30951157
AK
363 IOMMUTLBEntry iotlb;
364 MemoryRegionSection *section;
365 MemoryRegion *mr;
366 hwaddr len = *plen;
367
368 for (;;) {
a87f3954 369 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
370 mr = section->mr;
371
372 if (!mr->iommu_ops) {
373 break;
374 }
375
8d7b8cb9 376 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
377 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
378 | (addr & iotlb.addr_mask));
379 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
380 if (!(iotlb.perm & (1 << is_write))) {
381 mr = &io_mem_unassigned;
382 break;
383 }
384
385 as = iotlb.target_as;
386 }
387
fe680d0d 388 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954
PB
389 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
390 len = MIN(page, len);
391 }
392
30951157
AK
393 *plen = len;
394 *xlat = addr;
395 return mr;
90260c6c
JK
396}
397
398MemoryRegionSection *
399address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
400 hwaddr *plen)
401{
30951157 402 MemoryRegionSection *section;
c7086b4a 403 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
404
405 assert(!section->mr->iommu_ops);
406 return section;
90260c6c 407}
5b6dd868 408#endif
fd6ce8f6 409
5b6dd868 410void cpu_exec_init_all(void)
fdbb84d1 411{
5b6dd868 412#if !defined(CONFIG_USER_ONLY)
b2a8658e 413 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
414 memory_map_init();
415 io_mem_init();
fdbb84d1 416#endif
5b6dd868 417}
fdbb84d1 418
b170fce3 419#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
420
421static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 422{
259186a7 423 CPUState *cpu = opaque;
a513fe19 424
5b6dd868
BS
425 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
426 version_id is increased. */
259186a7 427 cpu->interrupt_request &= ~0x01;
c01a71c1 428 tlb_flush(cpu, 1);
5b6dd868
BS
429
430 return 0;
a513fe19 431}
7501267e 432
6c3bff0e
PD
433static int cpu_common_pre_load(void *opaque)
434{
435 CPUState *cpu = opaque;
436
437 cpu->exception_index = 0;
438
439 return 0;
440}
441
442static bool cpu_common_exception_index_needed(void *opaque)
443{
444 CPUState *cpu = opaque;
445
446 return cpu->exception_index != 0;
447}
448
449static const VMStateDescription vmstate_cpu_common_exception_index = {
450 .name = "cpu_common/exception_index",
451 .version_id = 1,
452 .minimum_version_id = 1,
453 .fields = (VMStateField[]) {
454 VMSTATE_INT32(exception_index, CPUState),
455 VMSTATE_END_OF_LIST()
456 }
457};
458
1a1562f5 459const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
460 .name = "cpu_common",
461 .version_id = 1,
462 .minimum_version_id = 1,
6c3bff0e 463 .pre_load = cpu_common_pre_load,
5b6dd868 464 .post_load = cpu_common_post_load,
35d08458 465 .fields = (VMStateField[]) {
259186a7
AF
466 VMSTATE_UINT32(halted, CPUState),
467 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 468 VMSTATE_END_OF_LIST()
6c3bff0e
PD
469 },
470 .subsections = (VMStateSubsection[]) {
471 {
472 .vmsd = &vmstate_cpu_common_exception_index,
473 .needed = cpu_common_exception_index_needed,
474 } , {
475 /* empty */
476 }
5b6dd868
BS
477 }
478};
1a1562f5 479
5b6dd868 480#endif
ea041c0e 481
38d8f5c8 482CPUState *qemu_get_cpu(int index)
ea041c0e 483{
bdc44640 484 CPUState *cpu;
ea041c0e 485
bdc44640 486 CPU_FOREACH(cpu) {
55e5c285 487 if (cpu->cpu_index == index) {
bdc44640 488 return cpu;
55e5c285 489 }
ea041c0e 490 }
5b6dd868 491
bdc44640 492 return NULL;
ea041c0e
FB
493}
494
09daed84
EI
495#if !defined(CONFIG_USER_ONLY)
496void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
497{
498 /* We only support one address space per cpu at the moment. */
499 assert(cpu->as == as);
500
501 if (cpu->tcg_as_listener) {
502 memory_listener_unregister(cpu->tcg_as_listener);
503 } else {
504 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
505 }
506 cpu->tcg_as_listener->commit = tcg_commit;
507 memory_listener_register(cpu->tcg_as_listener, as);
508}
509#endif
510
5b6dd868 511void cpu_exec_init(CPUArchState *env)
ea041c0e 512{
5b6dd868 513 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 514 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 515 CPUState *some_cpu;
5b6dd868
BS
516 int cpu_index;
517
518#if defined(CONFIG_USER_ONLY)
519 cpu_list_lock();
520#endif
5b6dd868 521 cpu_index = 0;
bdc44640 522 CPU_FOREACH(some_cpu) {
5b6dd868
BS
523 cpu_index++;
524 }
55e5c285 525 cpu->cpu_index = cpu_index;
1b1ed8dc 526 cpu->numa_node = 0;
f0c3c505 527 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 528 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 529#ifndef CONFIG_USER_ONLY
09daed84 530 cpu->as = &address_space_memory;
5b6dd868
BS
531 cpu->thread_id = qemu_get_thread_id();
532#endif
bdc44640 533 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
534#if defined(CONFIG_USER_ONLY)
535 cpu_list_unlock();
536#endif
e0d47944
AF
537 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
538 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
539 }
5b6dd868 540#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
541 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
542 cpu_save, cpu_load, env);
b170fce3 543 assert(cc->vmsd == NULL);
e0d47944 544 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 545#endif
b170fce3
AF
546 if (cc->vmsd != NULL) {
547 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
548 }
ea041c0e
FB
549}
550
1fddef4b 551#if defined(TARGET_HAS_ICE)
94df27fd 552#if defined(CONFIG_USER_ONLY)
00b941e5 553static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
554{
555 tb_invalidate_phys_page_range(pc, pc + 1, 0);
556}
557#else
00b941e5 558static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 559{
e8262a1b
MF
560 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
561 if (phys != -1) {
09daed84 562 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 563 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 564 }
1e7855a5 565}
c27004ec 566#endif
94df27fd 567#endif /* TARGET_HAS_ICE */
d720b93d 568
c527ee8f 569#if defined(CONFIG_USER_ONLY)
75a34036 570void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
571
572{
573}
574
75a34036 575int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
576 int flags, CPUWatchpoint **watchpoint)
577{
578 return -ENOSYS;
579}
580#else
6658ffb8 581/* Add a watchpoint. */
75a34036 582int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 583 int flags, CPUWatchpoint **watchpoint)
6658ffb8 584{
75a34036 585 vaddr len_mask = ~(len - 1);
c0ce998e 586 CPUWatchpoint *wp;
6658ffb8 587
b4051334 588 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
589 if ((len & (len - 1)) || (addr & ~len_mask) ||
590 len == 0 || len > TARGET_PAGE_SIZE) {
75a34036
AF
591 error_report("tried to set invalid watchpoint at %"
592 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
593 return -EINVAL;
594 }
7267c094 595 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
596
597 wp->vaddr = addr;
b4051334 598 wp->len_mask = len_mask;
a1d1bb31
AL
599 wp->flags = flags;
600
2dc9f411 601 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
602 if (flags & BP_GDB) {
603 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
604 } else {
605 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
606 }
6658ffb8 607
31b030d4 608 tlb_flush_page(cpu, addr);
a1d1bb31
AL
609
610 if (watchpoint)
611 *watchpoint = wp;
612 return 0;
6658ffb8
PB
613}
614
a1d1bb31 615/* Remove a specific watchpoint. */
75a34036 616int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 617 int flags)
6658ffb8 618{
75a34036 619 vaddr len_mask = ~(len - 1);
a1d1bb31 620 CPUWatchpoint *wp;
6658ffb8 621
ff4700b0 622 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
b4051334 623 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 624 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 625 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
626 return 0;
627 }
628 }
a1d1bb31 629 return -ENOENT;
6658ffb8
PB
630}
631
a1d1bb31 632/* Remove a specific watchpoint by reference. */
75a34036 633void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 634{
ff4700b0 635 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 636
31b030d4 637 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 638
7267c094 639 g_free(watchpoint);
a1d1bb31
AL
640}
641
642/* Remove all matching watchpoints. */
75a34036 643void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 644{
c0ce998e 645 CPUWatchpoint *wp, *next;
a1d1bb31 646
ff4700b0 647 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
648 if (wp->flags & mask) {
649 cpu_watchpoint_remove_by_ref(cpu, wp);
650 }
c0ce998e 651 }
7d03f82f 652}
c527ee8f 653#endif
7d03f82f 654
a1d1bb31 655/* Add a breakpoint. */
b3310ab3 656int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 657 CPUBreakpoint **breakpoint)
4c3a88a2 658{
1fddef4b 659#if defined(TARGET_HAS_ICE)
c0ce998e 660 CPUBreakpoint *bp;
3b46e624 661
7267c094 662 bp = g_malloc(sizeof(*bp));
4c3a88a2 663
a1d1bb31
AL
664 bp->pc = pc;
665 bp->flags = flags;
666
2dc9f411 667 /* keep all GDB-injected breakpoints in front */
00b941e5 668 if (flags & BP_GDB) {
f0c3c505 669 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 670 } else {
f0c3c505 671 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 672 }
3b46e624 673
f0c3c505 674 breakpoint_invalidate(cpu, pc);
a1d1bb31 675
00b941e5 676 if (breakpoint) {
a1d1bb31 677 *breakpoint = bp;
00b941e5 678 }
4c3a88a2
FB
679 return 0;
680#else
a1d1bb31 681 return -ENOSYS;
4c3a88a2
FB
682#endif
683}
684
a1d1bb31 685/* Remove a specific breakpoint. */
b3310ab3 686int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 687{
7d03f82f 688#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
689 CPUBreakpoint *bp;
690
f0c3c505 691 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 692 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 693 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
694 return 0;
695 }
7d03f82f 696 }
a1d1bb31
AL
697 return -ENOENT;
698#else
699 return -ENOSYS;
7d03f82f
EI
700#endif
701}
702
a1d1bb31 703/* Remove a specific breakpoint by reference. */
b3310ab3 704void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 705{
1fddef4b 706#if defined(TARGET_HAS_ICE)
f0c3c505
AF
707 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
708
709 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 710
7267c094 711 g_free(breakpoint);
a1d1bb31
AL
712#endif
713}
714
715/* Remove all matching breakpoints. */
b3310ab3 716void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31
AL
717{
718#if defined(TARGET_HAS_ICE)
c0ce998e 719 CPUBreakpoint *bp, *next;
a1d1bb31 720
f0c3c505 721 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
722 if (bp->flags & mask) {
723 cpu_breakpoint_remove_by_ref(cpu, bp);
724 }
c0ce998e 725 }
4c3a88a2
FB
726#endif
727}
728
c33a346e
FB
729/* enable or disable single step mode. EXCP_DEBUG is returned by the
730 CPU loop after each instruction */
3825b28f 731void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 732{
1fddef4b 733#if defined(TARGET_HAS_ICE)
ed2803da
AF
734 if (cpu->singlestep_enabled != enabled) {
735 cpu->singlestep_enabled = enabled;
736 if (kvm_enabled()) {
38e478ec 737 kvm_update_guest_debug(cpu, 0);
ed2803da 738 } else {
ccbb4d44 739 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 740 /* XXX: only flush what is necessary */
38e478ec 741 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
742 tb_flush(env);
743 }
c33a346e
FB
744 }
745#endif
746}
747
a47dddd7 748void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
749{
750 va_list ap;
493ae1f0 751 va_list ap2;
7501267e
FB
752
753 va_start(ap, fmt);
493ae1f0 754 va_copy(ap2, ap);
7501267e
FB
755 fprintf(stderr, "qemu: fatal: ");
756 vfprintf(stderr, fmt, ap);
757 fprintf(stderr, "\n");
878096ee 758 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
759 if (qemu_log_enabled()) {
760 qemu_log("qemu: fatal: ");
761 qemu_log_vprintf(fmt, ap2);
762 qemu_log("\n");
a0762859 763 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 764 qemu_log_flush();
93fcfe39 765 qemu_log_close();
924edcae 766 }
493ae1f0 767 va_end(ap2);
f9373291 768 va_end(ap);
fd052bf6
RV
769#if defined(CONFIG_USER_ONLY)
770 {
771 struct sigaction act;
772 sigfillset(&act.sa_mask);
773 act.sa_handler = SIG_DFL;
774 sigaction(SIGABRT, &act, NULL);
775 }
776#endif
7501267e
FB
777 abort();
778}
779
0124311e 780#if !defined(CONFIG_USER_ONLY)
041603fe
PB
781static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
782{
783 RAMBlock *block;
784
785 /* The list is protected by the iothread lock here. */
786 block = ram_list.mru_block;
787 if (block && addr - block->offset < block->length) {
788 goto found;
789 }
790 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
791 if (addr - block->offset < block->length) {
792 goto found;
793 }
794 }
795
796 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
797 abort();
798
799found:
800 ram_list.mru_block = block;
801 return block;
802}
803
a2f4d5be 804static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 805{
041603fe 806 ram_addr_t start1;
a2f4d5be
JQ
807 RAMBlock *block;
808 ram_addr_t end;
809
810 end = TARGET_PAGE_ALIGN(start + length);
811 start &= TARGET_PAGE_MASK;
d24981d3 812
041603fe
PB
813 block = qemu_get_ram_block(start);
814 assert(block == qemu_get_ram_block(end - 1));
815 start1 = (uintptr_t)block->host + (start - block->offset);
816 cpu_tlb_reset_dirty_all(start1, length);
d24981d3
JQ
817}
818
5579c7f3 819/* Note: start and end must be within the same ram block. */
a2f4d5be 820void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 821 unsigned client)
1ccde1cb 822{
1ccde1cb
FB
823 if (length == 0)
824 return;
ace694cc 825 cpu_physical_memory_clear_dirty_range(start, length, client);
f23db169 826
d24981d3 827 if (tcg_enabled()) {
a2f4d5be 828 tlb_reset_dirty_range_all(start, length);
5579c7f3 829 }
1ccde1cb
FB
830}
831
981fdf23 832static void cpu_physical_memory_set_dirty_tracking(bool enable)
74576198
AL
833{
834 in_migration = enable;
74576198
AL
835}
836
bb0e627a 837hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
838 MemoryRegionSection *section,
839 target_ulong vaddr,
840 hwaddr paddr, hwaddr xlat,
841 int prot,
842 target_ulong *address)
e5548617 843{
a8170e5e 844 hwaddr iotlb;
e5548617
BS
845 CPUWatchpoint *wp;
846
cc5bea60 847 if (memory_region_is_ram(section->mr)) {
e5548617
BS
848 /* Normal RAM. */
849 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 850 + xlat;
e5548617 851 if (!section->readonly) {
b41aac4f 852 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 853 } else {
b41aac4f 854 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
855 }
856 } else {
1b3fb98f 857 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 858 iotlb += xlat;
e5548617
BS
859 }
860
861 /* Make accesses to pages with watchpoints go via the
862 watchpoint trap routines. */
ff4700b0 863 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
e5548617
BS
864 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
865 /* Avoid trapping reads of pages with a write breakpoint. */
866 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 867 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
868 *address |= TLB_MMIO;
869 break;
870 }
871 }
872 }
873
874 return iotlb;
875}
9fa3e853
FB
876#endif /* defined(CONFIG_USER_ONLY) */
877
e2eef170 878#if !defined(CONFIG_USER_ONLY)
8da3ff18 879
c227f099 880static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 881 uint16_t section);
acc9d80b 882static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 883
575ddeb4 884static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
91138037
MA
885
886/*
887 * Set a custom physical guest memory alloator.
888 * Accelerators with unusual needs may need this. Hopefully, we can
889 * get rid of it eventually.
890 */
575ddeb4 891void phys_mem_set_alloc(void *(*alloc)(size_t))
91138037
MA
892{
893 phys_mem_alloc = alloc;
894}
895
53cb28cb
MA
896static uint16_t phys_section_add(PhysPageMap *map,
897 MemoryRegionSection *section)
5312bd8b 898{
68f3f65b
PB
899 /* The physical section number is ORed with a page-aligned
900 * pointer to produce the iotlb entries. Thus it should
901 * never overflow into the page-aligned value.
902 */
53cb28cb 903 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 904
53cb28cb
MA
905 if (map->sections_nb == map->sections_nb_alloc) {
906 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
907 map->sections = g_renew(MemoryRegionSection, map->sections,
908 map->sections_nb_alloc);
5312bd8b 909 }
53cb28cb 910 map->sections[map->sections_nb] = *section;
dfde4e6e 911 memory_region_ref(section->mr);
53cb28cb 912 return map->sections_nb++;
5312bd8b
AK
913}
914
058bc4b5
PB
915static void phys_section_destroy(MemoryRegion *mr)
916{
dfde4e6e
PB
917 memory_region_unref(mr);
918
058bc4b5
PB
919 if (mr->subpage) {
920 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 921 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
922 g_free(subpage);
923 }
924}
925
6092666e 926static void phys_sections_free(PhysPageMap *map)
5312bd8b 927{
9affd6fc
PB
928 while (map->sections_nb > 0) {
929 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
930 phys_section_destroy(section->mr);
931 }
9affd6fc
PB
932 g_free(map->sections);
933 g_free(map->nodes);
5312bd8b
AK
934}
935
ac1970fb 936static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
937{
938 subpage_t *subpage;
a8170e5e 939 hwaddr base = section->offset_within_address_space
0f0cb164 940 & TARGET_PAGE_MASK;
97115a8d 941 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 942 d->map.nodes, d->map.sections);
0f0cb164
AK
943 MemoryRegionSection subsection = {
944 .offset_within_address_space = base,
052e87b0 945 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 946 };
a8170e5e 947 hwaddr start, end;
0f0cb164 948
f3705d53 949 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 950
f3705d53 951 if (!(existing->mr->subpage)) {
acc9d80b 952 subpage = subpage_init(d->as, base);
3be91e86 953 subsection.address_space = d->as;
0f0cb164 954 subsection.mr = &subpage->iomem;
ac1970fb 955 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 956 phys_section_add(&d->map, &subsection));
0f0cb164 957 } else {
f3705d53 958 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
959 }
960 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 961 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
962 subpage_register(subpage, start, end,
963 phys_section_add(&d->map, section));
0f0cb164
AK
964}
965
966
052e87b0
PB
967static void register_multipage(AddressSpaceDispatch *d,
968 MemoryRegionSection *section)
33417e70 969{
a8170e5e 970 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 971 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
972 uint64_t num_pages = int128_get64(int128_rshift(section->size,
973 TARGET_PAGE_BITS));
dd81124b 974
733d5ef5
PB
975 assert(num_pages);
976 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
977}
978
ac1970fb 979static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 980{
89ae337a 981 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 982 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 983 MemoryRegionSection now = *section, remain = *section;
052e87b0 984 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 985
733d5ef5
PB
986 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
987 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
988 - now.offset_within_address_space;
989
052e87b0 990 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 991 register_subpage(d, &now);
733d5ef5 992 } else {
052e87b0 993 now.size = int128_zero();
733d5ef5 994 }
052e87b0
PB
995 while (int128_ne(remain.size, now.size)) {
996 remain.size = int128_sub(remain.size, now.size);
997 remain.offset_within_address_space += int128_get64(now.size);
998 remain.offset_within_region += int128_get64(now.size);
69b67646 999 now = remain;
052e87b0 1000 if (int128_lt(remain.size, page_size)) {
733d5ef5 1001 register_subpage(d, &now);
88266249 1002 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1003 now.size = page_size;
ac1970fb 1004 register_subpage(d, &now);
69b67646 1005 } else {
052e87b0 1006 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1007 register_multipage(d, &now);
69b67646 1008 }
0f0cb164
AK
1009 }
1010}
1011
62a2744c
SY
1012void qemu_flush_coalesced_mmio_buffer(void)
1013{
1014 if (kvm_enabled())
1015 kvm_flush_coalesced_mmio_buffer();
1016}
1017
b2a8658e
UD
1018void qemu_mutex_lock_ramlist(void)
1019{
1020 qemu_mutex_lock(&ram_list.mutex);
1021}
1022
1023void qemu_mutex_unlock_ramlist(void)
1024{
1025 qemu_mutex_unlock(&ram_list.mutex);
1026}
1027
e1e84ba0 1028#ifdef __linux__
c902760f
MT
1029
1030#include <sys/vfs.h>
1031
1032#define HUGETLBFS_MAGIC 0x958458f6
1033
fc7a5800 1034static long gethugepagesize(const char *path, Error **errp)
c902760f
MT
1035{
1036 struct statfs fs;
1037 int ret;
1038
1039 do {
9742bf26 1040 ret = statfs(path, &fs);
c902760f
MT
1041 } while (ret != 0 && errno == EINTR);
1042
1043 if (ret != 0) {
fc7a5800
HT
1044 error_setg_errno(errp, errno, "failed to get page size of file %s",
1045 path);
9742bf26 1046 return 0;
c902760f
MT
1047 }
1048
1049 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1050 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1051
1052 return fs.f_bsize;
1053}
1054
04b16653
AW
1055static void *file_ram_alloc(RAMBlock *block,
1056 ram_addr_t memory,
7f56e740
PB
1057 const char *path,
1058 Error **errp)
c902760f
MT
1059{
1060 char *filename;
8ca761f6
PF
1061 char *sanitized_name;
1062 char *c;
557529dd 1063 void *area = NULL;
c902760f 1064 int fd;
557529dd 1065 uint64_t hpagesize;
fc7a5800 1066 Error *local_err = NULL;
c902760f 1067
fc7a5800
HT
1068 hpagesize = gethugepagesize(path, &local_err);
1069 if (local_err) {
1070 error_propagate(errp, local_err);
f9a49dfa 1071 goto error;
c902760f
MT
1072 }
1073
1074 if (memory < hpagesize) {
557529dd
HT
1075 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1076 "or larger than huge page size 0x%" PRIx64,
1077 memory, hpagesize);
1078 goto error;
c902760f
MT
1079 }
1080
1081 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1082 error_setg(errp,
1083 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1084 goto error;
c902760f
MT
1085 }
1086
8ca761f6 1087 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1088 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1089 for (c = sanitized_name; *c != '\0'; c++) {
1090 if (*c == '/')
1091 *c = '_';
1092 }
1093
1094 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1095 sanitized_name);
1096 g_free(sanitized_name);
c902760f
MT
1097
1098 fd = mkstemp(filename);
1099 if (fd < 0) {
7f56e740
PB
1100 error_setg_errno(errp, errno,
1101 "unable to create backing store for hugepages");
e4ada482 1102 g_free(filename);
f9a49dfa 1103 goto error;
c902760f
MT
1104 }
1105 unlink(filename);
e4ada482 1106 g_free(filename);
c902760f
MT
1107
1108 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1109
1110 /*
1111 * ftruncate is not supported by hugetlbfs in older
1112 * hosts, so don't bother bailing out on errors.
1113 * If anything goes wrong with it under other filesystems,
1114 * mmap will fail.
1115 */
7f56e740 1116 if (ftruncate(fd, memory)) {
9742bf26 1117 perror("ftruncate");
7f56e740 1118 }
c902760f 1119
dbcb8981
PB
1120 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1121 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1122 fd, 0);
c902760f 1123 if (area == MAP_FAILED) {
7f56e740
PB
1124 error_setg_errno(errp, errno,
1125 "unable to map backing store for hugepages");
9742bf26 1126 close(fd);
f9a49dfa 1127 goto error;
c902760f 1128 }
ef36fa14
MT
1129
1130 if (mem_prealloc) {
38183310 1131 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1132 }
1133
04b16653 1134 block->fd = fd;
c902760f 1135 return area;
f9a49dfa
MT
1136
1137error:
1138 if (mem_prealloc) {
1139 exit(1);
1140 }
1141 return NULL;
c902760f
MT
1142}
1143#endif
1144
d17b5288 1145static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1146{
1147 RAMBlock *block, *next_block;
3e837b2c 1148 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1149
49cd9ac6
SH
1150 assert(size != 0); /* it would hand out same offset multiple times */
1151
a3161038 1152 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1153 return 0;
1154
a3161038 1155 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1156 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1157
1158 end = block->offset + block->length;
1159
a3161038 1160 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1161 if (next_block->offset >= end) {
1162 next = MIN(next, next_block->offset);
1163 }
1164 }
1165 if (next - end >= size && next - end < mingap) {
3e837b2c 1166 offset = end;
04b16653
AW
1167 mingap = next - end;
1168 }
1169 }
3e837b2c
AW
1170
1171 if (offset == RAM_ADDR_MAX) {
1172 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1173 (uint64_t)size);
1174 abort();
1175 }
1176
04b16653
AW
1177 return offset;
1178}
1179
652d7ec2 1180ram_addr_t last_ram_offset(void)
d17b5288
AW
1181{
1182 RAMBlock *block;
1183 ram_addr_t last = 0;
1184
a3161038 1185 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1186 last = MAX(last, block->offset + block->length);
1187
1188 return last;
1189}
1190
ddb97f1d
JB
1191static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1192{
1193 int ret;
ddb97f1d
JB
1194
1195 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1196 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1197 "dump-guest-core", true)) {
ddb97f1d
JB
1198 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1199 if (ret) {
1200 perror("qemu_madvise");
1201 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1202 "but dump_guest_core=off specified\n");
1203 }
1204 }
1205}
1206
20cfe881 1207static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1208{
20cfe881 1209 RAMBlock *block;
84b89d78 1210
a3161038 1211 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1212 if (block->offset == addr) {
20cfe881 1213 return block;
c5705a77
AK
1214 }
1215 }
20cfe881
HT
1216
1217 return NULL;
1218}
1219
1220void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1221{
1222 RAMBlock *new_block = find_ram_block(addr);
1223 RAMBlock *block;
1224
c5705a77
AK
1225 assert(new_block);
1226 assert(!new_block->idstr[0]);
84b89d78 1227
09e5ab63
AL
1228 if (dev) {
1229 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1230 if (id) {
1231 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1232 g_free(id);
84b89d78
CM
1233 }
1234 }
1235 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1236
b2a8658e
UD
1237 /* This assumes the iothread lock is taken here too. */
1238 qemu_mutex_lock_ramlist();
a3161038 1239 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1240 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1241 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1242 new_block->idstr);
1243 abort();
1244 }
1245 }
b2a8658e 1246 qemu_mutex_unlock_ramlist();
c5705a77
AK
1247}
1248
20cfe881
HT
1249void qemu_ram_unset_idstr(ram_addr_t addr)
1250{
1251 RAMBlock *block = find_ram_block(addr);
1252
1253 if (block) {
1254 memset(block->idstr, 0, sizeof(block->idstr));
1255 }
1256}
1257
8490fc78
LC
1258static int memory_try_enable_merging(void *addr, size_t len)
1259{
2ff3de68 1260 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1261 /* disabled by the user */
1262 return 0;
1263 }
1264
1265 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1266}
1267
ef701d7b 1268static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1269{
e1c57ab8 1270 RAMBlock *block;
2152f5ca
JQ
1271 ram_addr_t old_ram_size, new_ram_size;
1272
1273 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1274
b2a8658e
UD
1275 /* This assumes the iothread lock is taken here too. */
1276 qemu_mutex_lock_ramlist();
e1c57ab8
PB
1277 new_block->offset = find_ram_offset(new_block->length);
1278
1279 if (!new_block->host) {
1280 if (xen_enabled()) {
1281 xen_ram_alloc(new_block->offset, new_block->length, new_block->mr);
1282 } else {
1283 new_block->host = phys_mem_alloc(new_block->length);
39228250 1284 if (!new_block->host) {
ef701d7b
HT
1285 error_setg_errno(errp, errno,
1286 "cannot set up guest memory '%s'",
1287 memory_region_name(new_block->mr));
1288 qemu_mutex_unlock_ramlist();
1289 return -1;
39228250 1290 }
e1c57ab8 1291 memory_try_enable_merging(new_block->host, new_block->length);
6977dfe6 1292 }
c902760f 1293 }
94a6b54f 1294
abb26d63
PB
1295 /* Keep the list sorted from biggest to smallest block. */
1296 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1297 if (block->length < new_block->length) {
1298 break;
1299 }
1300 }
1301 if (block) {
1302 QTAILQ_INSERT_BEFORE(block, new_block, next);
1303 } else {
1304 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1305 }
0d6d3c87 1306 ram_list.mru_block = NULL;
94a6b54f 1307
f798b07f 1308 ram_list.version++;
b2a8658e 1309 qemu_mutex_unlock_ramlist();
f798b07f 1310
2152f5ca
JQ
1311 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1312
1313 if (new_ram_size > old_ram_size) {
1ab4c8ce
JQ
1314 int i;
1315 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1316 ram_list.dirty_memory[i] =
1317 bitmap_zero_extend(ram_list.dirty_memory[i],
1318 old_ram_size, new_ram_size);
1319 }
2152f5ca 1320 }
e1c57ab8 1321 cpu_physical_memory_set_dirty_range(new_block->offset, new_block->length);
94a6b54f 1322
e1c57ab8
PB
1323 qemu_ram_setup_dump(new_block->host, new_block->length);
1324 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE);
1325 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK);
ddb97f1d 1326
e1c57ab8
PB
1327 if (kvm_enabled()) {
1328 kvm_setup_guest_memory(new_block->host, new_block->length);
1329 }
6f0437e8 1330
94a6b54f
PB
1331 return new_block->offset;
1332}
e9a1ab19 1333
0b183fc8 1334#ifdef __linux__
e1c57ab8 1335ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1336 bool share, const char *mem_path,
7f56e740 1337 Error **errp)
e1c57ab8
PB
1338{
1339 RAMBlock *new_block;
ef701d7b
HT
1340 ram_addr_t addr;
1341 Error *local_err = NULL;
e1c57ab8
PB
1342
1343 if (xen_enabled()) {
7f56e740
PB
1344 error_setg(errp, "-mem-path not supported with Xen");
1345 return -1;
e1c57ab8
PB
1346 }
1347
1348 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1349 /*
1350 * file_ram_alloc() needs to allocate just like
1351 * phys_mem_alloc, but we haven't bothered to provide
1352 * a hook there.
1353 */
7f56e740
PB
1354 error_setg(errp,
1355 "-mem-path not supported with this accelerator");
1356 return -1;
e1c57ab8
PB
1357 }
1358
1359 size = TARGET_PAGE_ALIGN(size);
1360 new_block = g_malloc0(sizeof(*new_block));
1361 new_block->mr = mr;
1362 new_block->length = size;
dbcb8981 1363 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1364 new_block->host = file_ram_alloc(new_block, size,
1365 mem_path, errp);
1366 if (!new_block->host) {
1367 g_free(new_block);
1368 return -1;
1369 }
1370
ef701d7b
HT
1371 addr = ram_block_add(new_block, &local_err);
1372 if (local_err) {
1373 g_free(new_block);
1374 error_propagate(errp, local_err);
1375 return -1;
1376 }
1377 return addr;
e1c57ab8 1378}
0b183fc8 1379#endif
e1c57ab8
PB
1380
1381ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
ef701d7b 1382 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1383{
1384 RAMBlock *new_block;
ef701d7b
HT
1385 ram_addr_t addr;
1386 Error *local_err = NULL;
e1c57ab8
PB
1387
1388 size = TARGET_PAGE_ALIGN(size);
1389 new_block = g_malloc0(sizeof(*new_block));
1390 new_block->mr = mr;
1391 new_block->length = size;
1392 new_block->fd = -1;
1393 new_block->host = host;
1394 if (host) {
7bd4f430 1395 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1396 }
ef701d7b
HT
1397 addr = ram_block_add(new_block, &local_err);
1398 if (local_err) {
1399 g_free(new_block);
1400 error_propagate(errp, local_err);
1401 return -1;
1402 }
1403 return addr;
e1c57ab8
PB
1404}
1405
ef701d7b 1406ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1407{
ef701d7b 1408 return qemu_ram_alloc_from_ptr(size, NULL, mr, errp);
6977dfe6
YT
1409}
1410
1f2e98b6
AW
1411void qemu_ram_free_from_ptr(ram_addr_t addr)
1412{
1413 RAMBlock *block;
1414
b2a8658e
UD
1415 /* This assumes the iothread lock is taken here too. */
1416 qemu_mutex_lock_ramlist();
a3161038 1417 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1418 if (addr == block->offset) {
a3161038 1419 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1420 ram_list.mru_block = NULL;
f798b07f 1421 ram_list.version++;
7267c094 1422 g_free(block);
b2a8658e 1423 break;
1f2e98b6
AW
1424 }
1425 }
b2a8658e 1426 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1427}
1428
c227f099 1429void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1430{
04b16653
AW
1431 RAMBlock *block;
1432
b2a8658e
UD
1433 /* This assumes the iothread lock is taken here too. */
1434 qemu_mutex_lock_ramlist();
a3161038 1435 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1436 if (addr == block->offset) {
a3161038 1437 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1438 ram_list.mru_block = NULL;
f798b07f 1439 ram_list.version++;
7bd4f430 1440 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1441 ;
dfeaf2ab
MA
1442 } else if (xen_enabled()) {
1443 xen_invalidate_map_cache_entry(block->host);
089f3f76 1444#ifndef _WIN32
3435f395
MA
1445 } else if (block->fd >= 0) {
1446 munmap(block->host, block->length);
1447 close(block->fd);
089f3f76 1448#endif
04b16653 1449 } else {
dfeaf2ab 1450 qemu_anon_ram_free(block->host, block->length);
04b16653 1451 }
7267c094 1452 g_free(block);
b2a8658e 1453 break;
04b16653
AW
1454 }
1455 }
b2a8658e 1456 qemu_mutex_unlock_ramlist();
04b16653 1457
e9a1ab19
FB
1458}
1459
cd19cfa2
HY
1460#ifndef _WIN32
1461void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1462{
1463 RAMBlock *block;
1464 ram_addr_t offset;
1465 int flags;
1466 void *area, *vaddr;
1467
a3161038 1468 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1469 offset = addr - block->offset;
1470 if (offset < block->length) {
1471 vaddr = block->host + offset;
7bd4f430 1472 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1473 ;
dfeaf2ab
MA
1474 } else if (xen_enabled()) {
1475 abort();
cd19cfa2
HY
1476 } else {
1477 flags = MAP_FIXED;
1478 munmap(vaddr, length);
3435f395 1479 if (block->fd >= 0) {
dbcb8981
PB
1480 flags |= (block->flags & RAM_SHARED ?
1481 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1482 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1483 flags, block->fd, offset);
cd19cfa2 1484 } else {
2eb9fbaa
MA
1485 /*
1486 * Remap needs to match alloc. Accelerators that
1487 * set phys_mem_alloc never remap. If they did,
1488 * we'd need a remap hook here.
1489 */
1490 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1491
cd19cfa2
HY
1492 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1493 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1494 flags, -1, 0);
cd19cfa2
HY
1495 }
1496 if (area != vaddr) {
f15fbc4b
AP
1497 fprintf(stderr, "Could not remap addr: "
1498 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1499 length, addr);
1500 exit(1);
1501 }
8490fc78 1502 memory_try_enable_merging(vaddr, length);
ddb97f1d 1503 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1504 }
1505 return;
1506 }
1507 }
1508}
1509#endif /* !_WIN32 */
1510
a35ba7be
PB
1511int qemu_get_ram_fd(ram_addr_t addr)
1512{
1513 RAMBlock *block = qemu_get_ram_block(addr);
1514
1515 return block->fd;
1516}
1517
3fd74b84
DM
1518void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1519{
1520 RAMBlock *block = qemu_get_ram_block(addr);
1521
1522 return block->host;
1523}
1524
1b5ec234
PB
1525/* Return a host pointer to ram allocated with qemu_ram_alloc.
1526 With the exception of the softmmu code in this file, this should
1527 only be used for local memory (e.g. video ram) that the device owns,
1528 and knows it isn't going to access beyond the end of the block.
1529
1530 It should not be used for general purpose DMA.
1531 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1532 */
1533void *qemu_get_ram_ptr(ram_addr_t addr)
1534{
1535 RAMBlock *block = qemu_get_ram_block(addr);
1536
0d6d3c87
PB
1537 if (xen_enabled()) {
1538 /* We need to check if the requested address is in the RAM
1539 * because we don't want to map the entire memory in QEMU.
1540 * In that case just map until the end of the page.
1541 */
1542 if (block->offset == 0) {
1543 return xen_map_cache(addr, 0, 0);
1544 } else if (block->host == NULL) {
1545 block->host =
1546 xen_map_cache(block->offset, block->length, 1);
1547 }
1548 }
1549 return block->host + (addr - block->offset);
dc828ca1
PB
1550}
1551
38bee5dc
SS
1552/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1553 * but takes a size argument */
cb85f7ab 1554static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1555{
8ab934f9
SS
1556 if (*size == 0) {
1557 return NULL;
1558 }
868bb33f 1559 if (xen_enabled()) {
e41d7c69 1560 return xen_map_cache(addr, *size, 1);
868bb33f 1561 } else {
38bee5dc
SS
1562 RAMBlock *block;
1563
a3161038 1564 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1565 if (addr - block->offset < block->length) {
1566 if (addr - block->offset + *size > block->length)
1567 *size = block->length - addr + block->offset;
1568 return block->host + (addr - block->offset);
1569 }
1570 }
1571
1572 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1573 abort();
38bee5dc
SS
1574 }
1575}
1576
7443b437
PB
1577/* Some of the softmmu routines need to translate from a host pointer
1578 (typically a TLB entry) back to a ram offset. */
1b5ec234 1579MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1580{
94a6b54f
PB
1581 RAMBlock *block;
1582 uint8_t *host = ptr;
1583
868bb33f 1584 if (xen_enabled()) {
e41d7c69 1585 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1586 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1587 }
1588
23887b79
PB
1589 block = ram_list.mru_block;
1590 if (block && block->host && host - block->host < block->length) {
1591 goto found;
1592 }
1593
a3161038 1594 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1595 /* This case append when the block is not mapped. */
1596 if (block->host == NULL) {
1597 continue;
1598 }
f471a17e 1599 if (host - block->host < block->length) {
23887b79 1600 goto found;
f471a17e 1601 }
94a6b54f 1602 }
432d268c 1603
1b5ec234 1604 return NULL;
23887b79
PB
1605
1606found:
1607 *ram_addr = block->offset + (host - block->host);
1b5ec234 1608 return block->mr;
e890261f 1609}
f471a17e 1610
a8170e5e 1611static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1612 uint64_t val, unsigned size)
9fa3e853 1613{
52159192 1614 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1615 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1616 }
0e0df1e2
AK
1617 switch (size) {
1618 case 1:
1619 stb_p(qemu_get_ram_ptr(ram_addr), val);
1620 break;
1621 case 2:
1622 stw_p(qemu_get_ram_ptr(ram_addr), val);
1623 break;
1624 case 4:
1625 stl_p(qemu_get_ram_ptr(ram_addr), val);
1626 break;
1627 default:
1628 abort();
3a7d929e 1629 }
6886867e 1630 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
f23db169
FB
1631 /* we remove the notdirty callback only if the code has been
1632 flushed */
a2cd8c85 1633 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1634 CPUArchState *env = current_cpu->env_ptr;
93afeade 1635 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1636 }
9fa3e853
FB
1637}
1638
b018ddf6
PB
1639static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1640 unsigned size, bool is_write)
1641{
1642 return is_write;
1643}
1644
0e0df1e2 1645static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1646 .write = notdirty_mem_write,
b018ddf6 1647 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1648 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1649};
1650
0f459d16 1651/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1652static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1653{
93afeade
AF
1654 CPUState *cpu = current_cpu;
1655 CPUArchState *env = cpu->env_ptr;
06d55cc1 1656 target_ulong pc, cs_base;
0f459d16 1657 target_ulong vaddr;
a1d1bb31 1658 CPUWatchpoint *wp;
06d55cc1 1659 int cpu_flags;
0f459d16 1660
ff4700b0 1661 if (cpu->watchpoint_hit) {
06d55cc1
AL
1662 /* We re-entered the check after replacing the TB. Now raise
1663 * the debug interrupt so that is will trigger after the
1664 * current instruction. */
93afeade 1665 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1666 return;
1667 }
93afeade 1668 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1669 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
b4051334
AL
1670 if ((vaddr == (wp->vaddr & len_mask) ||
1671 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28 1672 wp->flags |= BP_WATCHPOINT_HIT;
ff4700b0
AF
1673 if (!cpu->watchpoint_hit) {
1674 cpu->watchpoint_hit = wp;
239c51a5 1675 tb_check_watchpoint(cpu);
6e140f28 1676 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1677 cpu->exception_index = EXCP_DEBUG;
5638d180 1678 cpu_loop_exit(cpu);
6e140f28
AL
1679 } else {
1680 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1681 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1682 cpu_resume_from_signal(cpu, NULL);
6e140f28 1683 }
06d55cc1 1684 }
6e140f28
AL
1685 } else {
1686 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1687 }
1688 }
1689}
1690
6658ffb8
PB
1691/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1692 so these check for a hit then pass through to the normal out-of-line
1693 phys routines. */
a8170e5e 1694static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1695 unsigned size)
6658ffb8 1696{
1ec9b909
AK
1697 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1698 switch (size) {
2c17449b 1699 case 1: return ldub_phys(&address_space_memory, addr);
41701aa4 1700 case 2: return lduw_phys(&address_space_memory, addr);
fdfba1a2 1701 case 4: return ldl_phys(&address_space_memory, addr);
1ec9b909
AK
1702 default: abort();
1703 }
6658ffb8
PB
1704}
1705
a8170e5e 1706static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1707 uint64_t val, unsigned size)
6658ffb8 1708{
1ec9b909
AK
1709 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1710 switch (size) {
67364150 1711 case 1:
db3be60d 1712 stb_phys(&address_space_memory, addr, val);
67364150
MF
1713 break;
1714 case 2:
5ce5944d 1715 stw_phys(&address_space_memory, addr, val);
67364150
MF
1716 break;
1717 case 4:
ab1da857 1718 stl_phys(&address_space_memory, addr, val);
67364150 1719 break;
1ec9b909
AK
1720 default: abort();
1721 }
6658ffb8
PB
1722}
1723
1ec9b909
AK
1724static const MemoryRegionOps watch_mem_ops = {
1725 .read = watch_mem_read,
1726 .write = watch_mem_write,
1727 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1728};
6658ffb8 1729
a8170e5e 1730static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1731 unsigned len)
db7b5426 1732{
acc9d80b
JK
1733 subpage_t *subpage = opaque;
1734 uint8_t buf[4];
791af8c8 1735
db7b5426 1736#if defined(DEBUG_SUBPAGE)
016e9d62 1737 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1738 subpage, len, addr);
db7b5426 1739#endif
acc9d80b
JK
1740 address_space_read(subpage->as, addr + subpage->base, buf, len);
1741 switch (len) {
1742 case 1:
1743 return ldub_p(buf);
1744 case 2:
1745 return lduw_p(buf);
1746 case 4:
1747 return ldl_p(buf);
1748 default:
1749 abort();
1750 }
db7b5426
BS
1751}
1752
a8170e5e 1753static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1754 uint64_t value, unsigned len)
db7b5426 1755{
acc9d80b
JK
1756 subpage_t *subpage = opaque;
1757 uint8_t buf[4];
1758
db7b5426 1759#if defined(DEBUG_SUBPAGE)
016e9d62 1760 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1761 " value %"PRIx64"\n",
1762 __func__, subpage, len, addr, value);
db7b5426 1763#endif
acc9d80b
JK
1764 switch (len) {
1765 case 1:
1766 stb_p(buf, value);
1767 break;
1768 case 2:
1769 stw_p(buf, value);
1770 break;
1771 case 4:
1772 stl_p(buf, value);
1773 break;
1774 default:
1775 abort();
1776 }
1777 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1778}
1779
c353e4cc 1780static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 1781 unsigned len, bool is_write)
c353e4cc 1782{
acc9d80b 1783 subpage_t *subpage = opaque;
c353e4cc 1784#if defined(DEBUG_SUBPAGE)
016e9d62 1785 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 1786 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1787#endif
1788
acc9d80b 1789 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 1790 len, is_write);
c353e4cc
PB
1791}
1792
70c68e44
AK
1793static const MemoryRegionOps subpage_ops = {
1794 .read = subpage_read,
1795 .write = subpage_write,
c353e4cc 1796 .valid.accepts = subpage_accepts,
70c68e44 1797 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1798};
1799
c227f099 1800static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1801 uint16_t section)
db7b5426
BS
1802{
1803 int idx, eidx;
1804
1805 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1806 return -1;
1807 idx = SUBPAGE_IDX(start);
1808 eidx = SUBPAGE_IDX(end);
1809#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1810 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1811 __func__, mmio, start, end, idx, eidx, section);
db7b5426 1812#endif
db7b5426 1813 for (; idx <= eidx; idx++) {
5312bd8b 1814 mmio->sub_section[idx] = section;
db7b5426
BS
1815 }
1816
1817 return 0;
1818}
1819
acc9d80b 1820static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1821{
c227f099 1822 subpage_t *mmio;
db7b5426 1823
7267c094 1824 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1825
acc9d80b 1826 mmio->as = as;
1eec614b 1827 mmio->base = base;
2c9b15ca 1828 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 1829 NULL, TARGET_PAGE_SIZE);
b3b00c78 1830 mmio->iomem.subpage = true;
db7b5426 1831#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1832 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1833 mmio, base, TARGET_PAGE_SIZE);
db7b5426 1834#endif
b41aac4f 1835 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1836
1837 return mmio;
1838}
1839
a656e22f
PC
1840static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1841 MemoryRegion *mr)
5312bd8b 1842{
a656e22f 1843 assert(as);
5312bd8b 1844 MemoryRegionSection section = {
a656e22f 1845 .address_space = as,
5312bd8b
AK
1846 .mr = mr,
1847 .offset_within_address_space = 0,
1848 .offset_within_region = 0,
052e87b0 1849 .size = int128_2_64(),
5312bd8b
AK
1850 };
1851
53cb28cb 1852 return phys_section_add(map, &section);
5312bd8b
AK
1853}
1854
77717094 1855MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
aa102231 1856{
77717094 1857 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1858}
1859
e9179ce1
AK
1860static void io_mem_init(void)
1861{
1f6245e5 1862 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 1863 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 1864 NULL, UINT64_MAX);
2c9b15ca 1865 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 1866 NULL, UINT64_MAX);
2c9b15ca 1867 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 1868 NULL, UINT64_MAX);
e9179ce1
AK
1869}
1870
ac1970fb 1871static void mem_begin(MemoryListener *listener)
00752703
PB
1872{
1873 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
1874 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1875 uint16_t n;
1876
a656e22f 1877 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 1878 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 1879 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 1880 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 1881 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 1882 assert(n == PHYS_SECTION_ROM);
a656e22f 1883 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 1884 assert(n == PHYS_SECTION_WATCH);
00752703 1885
9736e55b 1886 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
1887 d->as = as;
1888 as->next_dispatch = d;
1889}
1890
1891static void mem_commit(MemoryListener *listener)
ac1970fb 1892{
89ae337a 1893 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1894 AddressSpaceDispatch *cur = as->dispatch;
1895 AddressSpaceDispatch *next = as->next_dispatch;
1896
53cb28cb 1897 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 1898
0475d94f 1899 as->dispatch = next;
b41aac4f 1900
53cb28cb
MA
1901 if (cur) {
1902 phys_sections_free(&cur->map);
1903 g_free(cur);
1904 }
9affd6fc
PB
1905}
1906
1d71148e 1907static void tcg_commit(MemoryListener *listener)
50c1e149 1908{
182735ef 1909 CPUState *cpu;
117712c3
AK
1910
1911 /* since each CPU stores ram addresses in its TLB cache, we must
1912 reset the modified entries */
1913 /* XXX: slow ! */
bdc44640 1914 CPU_FOREACH(cpu) {
33bde2e1
EI
1915 /* FIXME: Disentangle the cpu.h circular files deps so we can
1916 directly get the right CPU from listener. */
1917 if (cpu->tcg_as_listener != listener) {
1918 continue;
1919 }
00c8cb0a 1920 tlb_flush(cpu, 1);
117712c3 1921 }
50c1e149
AK
1922}
1923
93632747
AK
1924static void core_log_global_start(MemoryListener *listener)
1925{
981fdf23 1926 cpu_physical_memory_set_dirty_tracking(true);
93632747
AK
1927}
1928
1929static void core_log_global_stop(MemoryListener *listener)
1930{
981fdf23 1931 cpu_physical_memory_set_dirty_tracking(false);
93632747
AK
1932}
1933
93632747 1934static MemoryListener core_memory_listener = {
93632747
AK
1935 .log_global_start = core_log_global_start,
1936 .log_global_stop = core_log_global_stop,
ac1970fb 1937 .priority = 1,
93632747
AK
1938};
1939
ac1970fb
AK
1940void address_space_init_dispatch(AddressSpace *as)
1941{
00752703 1942 as->dispatch = NULL;
89ae337a 1943 as->dispatch_listener = (MemoryListener) {
ac1970fb 1944 .begin = mem_begin,
00752703 1945 .commit = mem_commit,
ac1970fb
AK
1946 .region_add = mem_add,
1947 .region_nop = mem_add,
1948 .priority = 0,
1949 };
89ae337a 1950 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1951}
1952
83f3c251
AK
1953void address_space_destroy_dispatch(AddressSpace *as)
1954{
1955 AddressSpaceDispatch *d = as->dispatch;
1956
89ae337a 1957 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1958 g_free(d);
1959 as->dispatch = NULL;
1960}
1961
62152b8a
AK
1962static void memory_map_init(void)
1963{
7267c094 1964 system_memory = g_malloc(sizeof(*system_memory));
03f49957 1965
57271d63 1966 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 1967 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1968
7267c094 1969 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
1970 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1971 65536);
7dca8043 1972 address_space_init(&address_space_io, system_io, "I/O");
93632747 1973
f6790af6 1974 memory_listener_register(&core_memory_listener, &address_space_memory);
62152b8a
AK
1975}
1976
1977MemoryRegion *get_system_memory(void)
1978{
1979 return system_memory;
1980}
1981
309cb471
AK
1982MemoryRegion *get_system_io(void)
1983{
1984 return system_io;
1985}
1986
e2eef170
PB
1987#endif /* !defined(CONFIG_USER_ONLY) */
1988
13eb76e0
FB
1989/* physical memory access (slow version, mainly for debug) */
1990#if defined(CONFIG_USER_ONLY)
f17ec444 1991int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 1992 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1993{
1994 int l, flags;
1995 target_ulong page;
53a5960a 1996 void * p;
13eb76e0
FB
1997
1998 while (len > 0) {
1999 page = addr & TARGET_PAGE_MASK;
2000 l = (page + TARGET_PAGE_SIZE) - addr;
2001 if (l > len)
2002 l = len;
2003 flags = page_get_flags(page);
2004 if (!(flags & PAGE_VALID))
a68fe89c 2005 return -1;
13eb76e0
FB
2006 if (is_write) {
2007 if (!(flags & PAGE_WRITE))
a68fe89c 2008 return -1;
579a97f7 2009 /* XXX: this code should not depend on lock_user */
72fb7daa 2010 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2011 return -1;
72fb7daa
AJ
2012 memcpy(p, buf, l);
2013 unlock_user(p, addr, l);
13eb76e0
FB
2014 } else {
2015 if (!(flags & PAGE_READ))
a68fe89c 2016 return -1;
579a97f7 2017 /* XXX: this code should not depend on lock_user */
72fb7daa 2018 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2019 return -1;
72fb7daa 2020 memcpy(buf, p, l);
5b257578 2021 unlock_user(p, addr, 0);
13eb76e0
FB
2022 }
2023 len -= l;
2024 buf += l;
2025 addr += l;
2026 }
a68fe89c 2027 return 0;
13eb76e0 2028}
8df1cd07 2029
13eb76e0 2030#else
51d7a9eb 2031
a8170e5e
AK
2032static void invalidate_and_set_dirty(hwaddr addr,
2033 hwaddr length)
51d7a9eb 2034{
a2cd8c85 2035 if (cpu_physical_memory_is_clean(addr)) {
51d7a9eb
AP
2036 /* invalidate code */
2037 tb_invalidate_phys_page_range(addr, addr + length, 0);
2038 /* set dirty bit */
6886867e 2039 cpu_physical_memory_set_dirty_range_nocode(addr, length);
51d7a9eb 2040 }
e226939d 2041 xen_modified_memory(addr, length);
51d7a9eb
AP
2042}
2043
23326164 2044static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2045{
e1622f4b 2046 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2047
2048 /* Regions are assumed to support 1-4 byte accesses unless
2049 otherwise specified. */
23326164
RH
2050 if (access_size_max == 0) {
2051 access_size_max = 4;
2052 }
2053
2054 /* Bound the maximum access by the alignment of the address. */
2055 if (!mr->ops->impl.unaligned) {
2056 unsigned align_size_max = addr & -addr;
2057 if (align_size_max != 0 && align_size_max < access_size_max) {
2058 access_size_max = align_size_max;
2059 }
82f2563f 2060 }
23326164
RH
2061
2062 /* Don't attempt accesses larger than the maximum. */
2063 if (l > access_size_max) {
2064 l = access_size_max;
82f2563f 2065 }
098178f2
PB
2066 if (l & (l - 1)) {
2067 l = 1 << (qemu_fls(l) - 1);
2068 }
23326164
RH
2069
2070 return l;
82f2563f
PB
2071}
2072
fd8aaa76 2073bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 2074 int len, bool is_write)
13eb76e0 2075{
149f54b5 2076 hwaddr l;
13eb76e0 2077 uint8_t *ptr;
791af8c8 2078 uint64_t val;
149f54b5 2079 hwaddr addr1;
5c8a00ce 2080 MemoryRegion *mr;
fd8aaa76 2081 bool error = false;
3b46e624 2082
13eb76e0 2083 while (len > 0) {
149f54b5 2084 l = len;
5c8a00ce 2085 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2086
13eb76e0 2087 if (is_write) {
5c8a00ce
PB
2088 if (!memory_access_is_direct(mr, is_write)) {
2089 l = memory_access_size(mr, l, addr1);
4917cf44 2090 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2091 potential bugs */
23326164
RH
2092 switch (l) {
2093 case 8:
2094 /* 64 bit write access */
2095 val = ldq_p(buf);
2096 error |= io_mem_write(mr, addr1, val, 8);
2097 break;
2098 case 4:
1c213d19 2099 /* 32 bit write access */
c27004ec 2100 val = ldl_p(buf);
5c8a00ce 2101 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
2102 break;
2103 case 2:
1c213d19 2104 /* 16 bit write access */
c27004ec 2105 val = lduw_p(buf);
5c8a00ce 2106 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
2107 break;
2108 case 1:
1c213d19 2109 /* 8 bit write access */
c27004ec 2110 val = ldub_p(buf);
5c8a00ce 2111 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
2112 break;
2113 default:
2114 abort();
13eb76e0 2115 }
2bbfa05d 2116 } else {
5c8a00ce 2117 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2118 /* RAM case */
5579c7f3 2119 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2120 memcpy(ptr, buf, l);
51d7a9eb 2121 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
2122 }
2123 } else {
5c8a00ce 2124 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2125 /* I/O case */
5c8a00ce 2126 l = memory_access_size(mr, l, addr1);
23326164
RH
2127 switch (l) {
2128 case 8:
2129 /* 64 bit read access */
2130 error |= io_mem_read(mr, addr1, &val, 8);
2131 stq_p(buf, val);
2132 break;
2133 case 4:
13eb76e0 2134 /* 32 bit read access */
5c8a00ce 2135 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 2136 stl_p(buf, val);
23326164
RH
2137 break;
2138 case 2:
13eb76e0 2139 /* 16 bit read access */
5c8a00ce 2140 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 2141 stw_p(buf, val);
23326164
RH
2142 break;
2143 case 1:
1c213d19 2144 /* 8 bit read access */
5c8a00ce 2145 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 2146 stb_p(buf, val);
23326164
RH
2147 break;
2148 default:
2149 abort();
13eb76e0
FB
2150 }
2151 } else {
2152 /* RAM case */
5c8a00ce 2153 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2154 memcpy(buf, ptr, l);
13eb76e0
FB
2155 }
2156 }
2157 len -= l;
2158 buf += l;
2159 addr += l;
2160 }
fd8aaa76
PB
2161
2162 return error;
13eb76e0 2163}
8df1cd07 2164
fd8aaa76 2165bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2166 const uint8_t *buf, int len)
2167{
fd8aaa76 2168 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2169}
2170
fd8aaa76 2171bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2172{
fd8aaa76 2173 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2174}
2175
2176
a8170e5e 2177void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2178 int len, int is_write)
2179{
fd8aaa76 2180 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2181}
2182
582b55a9
AG
2183enum write_rom_type {
2184 WRITE_DATA,
2185 FLUSH_CACHE,
2186};
2187
2a221651 2188static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2189 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2190{
149f54b5 2191 hwaddr l;
d0ecd2aa 2192 uint8_t *ptr;
149f54b5 2193 hwaddr addr1;
5c8a00ce 2194 MemoryRegion *mr;
3b46e624 2195
d0ecd2aa 2196 while (len > 0) {
149f54b5 2197 l = len;
2a221651 2198 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2199
5c8a00ce
PB
2200 if (!(memory_region_is_ram(mr) ||
2201 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2202 /* do nothing */
2203 } else {
5c8a00ce 2204 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2205 /* ROM/RAM case */
5579c7f3 2206 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2207 switch (type) {
2208 case WRITE_DATA:
2209 memcpy(ptr, buf, l);
2210 invalidate_and_set_dirty(addr1, l);
2211 break;
2212 case FLUSH_CACHE:
2213 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2214 break;
2215 }
d0ecd2aa
FB
2216 }
2217 len -= l;
2218 buf += l;
2219 addr += l;
2220 }
2221}
2222
582b55a9 2223/* used for ROM loading : can write in RAM and ROM */
2a221651 2224void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2225 const uint8_t *buf, int len)
2226{
2a221651 2227 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2228}
2229
2230void cpu_flush_icache_range(hwaddr start, int len)
2231{
2232 /*
2233 * This function should do the same thing as an icache flush that was
2234 * triggered from within the guest. For TCG we are always cache coherent,
2235 * so there is no need to flush anything. For KVM / Xen we need to flush
2236 * the host's instruction cache at least.
2237 */
2238 if (tcg_enabled()) {
2239 return;
2240 }
2241
2a221651
EI
2242 cpu_physical_memory_write_rom_internal(&address_space_memory,
2243 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2244}
2245
6d16c2f8 2246typedef struct {
d3e71559 2247 MemoryRegion *mr;
6d16c2f8 2248 void *buffer;
a8170e5e
AK
2249 hwaddr addr;
2250 hwaddr len;
6d16c2f8
AL
2251} BounceBuffer;
2252
2253static BounceBuffer bounce;
2254
ba223c29
AL
2255typedef struct MapClient {
2256 void *opaque;
2257 void (*callback)(void *opaque);
72cf2d4f 2258 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2259} MapClient;
2260
72cf2d4f
BS
2261static QLIST_HEAD(map_client_list, MapClient) map_client_list
2262 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2263
2264void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2265{
7267c094 2266 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2267
2268 client->opaque = opaque;
2269 client->callback = callback;
72cf2d4f 2270 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2271 return client;
2272}
2273
8b9c99d9 2274static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2275{
2276 MapClient *client = (MapClient *)_client;
2277
72cf2d4f 2278 QLIST_REMOVE(client, link);
7267c094 2279 g_free(client);
ba223c29
AL
2280}
2281
2282static void cpu_notify_map_clients(void)
2283{
2284 MapClient *client;
2285
72cf2d4f
BS
2286 while (!QLIST_EMPTY(&map_client_list)) {
2287 client = QLIST_FIRST(&map_client_list);
ba223c29 2288 client->callback(client->opaque);
34d5e948 2289 cpu_unregister_map_client(client);
ba223c29
AL
2290 }
2291}
2292
51644ab7
PB
2293bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2294{
5c8a00ce 2295 MemoryRegion *mr;
51644ab7
PB
2296 hwaddr l, xlat;
2297
2298 while (len > 0) {
2299 l = len;
5c8a00ce
PB
2300 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2301 if (!memory_access_is_direct(mr, is_write)) {
2302 l = memory_access_size(mr, l, addr);
2303 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2304 return false;
2305 }
2306 }
2307
2308 len -= l;
2309 addr += l;
2310 }
2311 return true;
2312}
2313
6d16c2f8
AL
2314/* Map a physical memory region into a host virtual address.
2315 * May map a subset of the requested range, given by and returned in *plen.
2316 * May return NULL if resources needed to perform the mapping are exhausted.
2317 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2318 * Use cpu_register_map_client() to know when retrying the map operation is
2319 * likely to succeed.
6d16c2f8 2320 */
ac1970fb 2321void *address_space_map(AddressSpace *as,
a8170e5e
AK
2322 hwaddr addr,
2323 hwaddr *plen,
ac1970fb 2324 bool is_write)
6d16c2f8 2325{
a8170e5e 2326 hwaddr len = *plen;
e3127ae0
PB
2327 hwaddr done = 0;
2328 hwaddr l, xlat, base;
2329 MemoryRegion *mr, *this_mr;
2330 ram_addr_t raddr;
6d16c2f8 2331
e3127ae0
PB
2332 if (len == 0) {
2333 return NULL;
2334 }
38bee5dc 2335
e3127ae0
PB
2336 l = len;
2337 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2338 if (!memory_access_is_direct(mr, is_write)) {
2339 if (bounce.buffer) {
2340 return NULL;
6d16c2f8 2341 }
e85d9db5
KW
2342 /* Avoid unbounded allocations */
2343 l = MIN(l, TARGET_PAGE_SIZE);
2344 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2345 bounce.addr = addr;
2346 bounce.len = l;
d3e71559
PB
2347
2348 memory_region_ref(mr);
2349 bounce.mr = mr;
e3127ae0
PB
2350 if (!is_write) {
2351 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2352 }
6d16c2f8 2353
e3127ae0
PB
2354 *plen = l;
2355 return bounce.buffer;
2356 }
2357
2358 base = xlat;
2359 raddr = memory_region_get_ram_addr(mr);
2360
2361 for (;;) {
6d16c2f8
AL
2362 len -= l;
2363 addr += l;
e3127ae0
PB
2364 done += l;
2365 if (len == 0) {
2366 break;
2367 }
2368
2369 l = len;
2370 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2371 if (this_mr != mr || xlat != base + done) {
2372 break;
2373 }
6d16c2f8 2374 }
e3127ae0 2375
d3e71559 2376 memory_region_ref(mr);
e3127ae0
PB
2377 *plen = done;
2378 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2379}
2380
ac1970fb 2381/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2382 * Will also mark the memory as dirty if is_write == 1. access_len gives
2383 * the amount of memory that was actually read or written by the caller.
2384 */
a8170e5e
AK
2385void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2386 int is_write, hwaddr access_len)
6d16c2f8
AL
2387{
2388 if (buffer != bounce.buffer) {
d3e71559
PB
2389 MemoryRegion *mr;
2390 ram_addr_t addr1;
2391
2392 mr = qemu_ram_addr_from_host(buffer, &addr1);
2393 assert(mr != NULL);
6d16c2f8 2394 if (is_write) {
6886867e 2395 invalidate_and_set_dirty(addr1, access_len);
6d16c2f8 2396 }
868bb33f 2397 if (xen_enabled()) {
e41d7c69 2398 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2399 }
d3e71559 2400 memory_region_unref(mr);
6d16c2f8
AL
2401 return;
2402 }
2403 if (is_write) {
ac1970fb 2404 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2405 }
f8a83245 2406 qemu_vfree(bounce.buffer);
6d16c2f8 2407 bounce.buffer = NULL;
d3e71559 2408 memory_region_unref(bounce.mr);
ba223c29 2409 cpu_notify_map_clients();
6d16c2f8 2410}
d0ecd2aa 2411
a8170e5e
AK
2412void *cpu_physical_memory_map(hwaddr addr,
2413 hwaddr *plen,
ac1970fb
AK
2414 int is_write)
2415{
2416 return address_space_map(&address_space_memory, addr, plen, is_write);
2417}
2418
a8170e5e
AK
2419void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2420 int is_write, hwaddr access_len)
ac1970fb
AK
2421{
2422 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2423}
2424
8df1cd07 2425/* warning: addr must be aligned */
fdfba1a2 2426static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2427 enum device_endian endian)
8df1cd07 2428{
8df1cd07 2429 uint8_t *ptr;
791af8c8 2430 uint64_t val;
5c8a00ce 2431 MemoryRegion *mr;
149f54b5
PB
2432 hwaddr l = 4;
2433 hwaddr addr1;
8df1cd07 2434
fdfba1a2 2435 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2436 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2437 /* I/O case */
5c8a00ce 2438 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2439#if defined(TARGET_WORDS_BIGENDIAN)
2440 if (endian == DEVICE_LITTLE_ENDIAN) {
2441 val = bswap32(val);
2442 }
2443#else
2444 if (endian == DEVICE_BIG_ENDIAN) {
2445 val = bswap32(val);
2446 }
2447#endif
8df1cd07
FB
2448 } else {
2449 /* RAM case */
5c8a00ce 2450 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2451 & TARGET_PAGE_MASK)
149f54b5 2452 + addr1);
1e78bcc1
AG
2453 switch (endian) {
2454 case DEVICE_LITTLE_ENDIAN:
2455 val = ldl_le_p(ptr);
2456 break;
2457 case DEVICE_BIG_ENDIAN:
2458 val = ldl_be_p(ptr);
2459 break;
2460 default:
2461 val = ldl_p(ptr);
2462 break;
2463 }
8df1cd07
FB
2464 }
2465 return val;
2466}
2467
fdfba1a2 2468uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2469{
fdfba1a2 2470 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2471}
2472
fdfba1a2 2473uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2474{
fdfba1a2 2475 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2476}
2477
fdfba1a2 2478uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2479{
fdfba1a2 2480 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2481}
2482
84b7b8e7 2483/* warning: addr must be aligned */
2c17449b 2484static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2485 enum device_endian endian)
84b7b8e7 2486{
84b7b8e7
FB
2487 uint8_t *ptr;
2488 uint64_t val;
5c8a00ce 2489 MemoryRegion *mr;
149f54b5
PB
2490 hwaddr l = 8;
2491 hwaddr addr1;
84b7b8e7 2492
2c17449b 2493 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2494 false);
2495 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2496 /* I/O case */
5c8a00ce 2497 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2498#if defined(TARGET_WORDS_BIGENDIAN)
2499 if (endian == DEVICE_LITTLE_ENDIAN) {
2500 val = bswap64(val);
2501 }
2502#else
2503 if (endian == DEVICE_BIG_ENDIAN) {
2504 val = bswap64(val);
2505 }
84b7b8e7
FB
2506#endif
2507 } else {
2508 /* RAM case */
5c8a00ce 2509 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2510 & TARGET_PAGE_MASK)
149f54b5 2511 + addr1);
1e78bcc1
AG
2512 switch (endian) {
2513 case DEVICE_LITTLE_ENDIAN:
2514 val = ldq_le_p(ptr);
2515 break;
2516 case DEVICE_BIG_ENDIAN:
2517 val = ldq_be_p(ptr);
2518 break;
2519 default:
2520 val = ldq_p(ptr);
2521 break;
2522 }
84b7b8e7
FB
2523 }
2524 return val;
2525}
2526
2c17449b 2527uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2528{
2c17449b 2529 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2530}
2531
2c17449b 2532uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2533{
2c17449b 2534 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2535}
2536
2c17449b 2537uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2538{
2c17449b 2539 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2540}
2541
aab33094 2542/* XXX: optimize */
2c17449b 2543uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
aab33094
FB
2544{
2545 uint8_t val;
2c17449b 2546 address_space_rw(as, addr, &val, 1, 0);
aab33094
FB
2547 return val;
2548}
2549
733f0b02 2550/* warning: addr must be aligned */
41701aa4 2551static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2552 enum device_endian endian)
aab33094 2553{
733f0b02
MT
2554 uint8_t *ptr;
2555 uint64_t val;
5c8a00ce 2556 MemoryRegion *mr;
149f54b5
PB
2557 hwaddr l = 2;
2558 hwaddr addr1;
733f0b02 2559
41701aa4 2560 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2561 false);
2562 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2563 /* I/O case */
5c8a00ce 2564 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2565#if defined(TARGET_WORDS_BIGENDIAN)
2566 if (endian == DEVICE_LITTLE_ENDIAN) {
2567 val = bswap16(val);
2568 }
2569#else
2570 if (endian == DEVICE_BIG_ENDIAN) {
2571 val = bswap16(val);
2572 }
2573#endif
733f0b02
MT
2574 } else {
2575 /* RAM case */
5c8a00ce 2576 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2577 & TARGET_PAGE_MASK)
149f54b5 2578 + addr1);
1e78bcc1
AG
2579 switch (endian) {
2580 case DEVICE_LITTLE_ENDIAN:
2581 val = lduw_le_p(ptr);
2582 break;
2583 case DEVICE_BIG_ENDIAN:
2584 val = lduw_be_p(ptr);
2585 break;
2586 default:
2587 val = lduw_p(ptr);
2588 break;
2589 }
733f0b02
MT
2590 }
2591 return val;
aab33094
FB
2592}
2593
41701aa4 2594uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2595{
41701aa4 2596 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2597}
2598
41701aa4 2599uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2600{
41701aa4 2601 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2602}
2603
41701aa4 2604uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2605{
41701aa4 2606 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2607}
2608
8df1cd07
FB
2609/* warning: addr must be aligned. The ram page is not masked as dirty
2610 and the code inside is not invalidated. It is useful if the dirty
2611 bits are used to track modified PTEs */
2198a121 2612void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
8df1cd07 2613{
8df1cd07 2614 uint8_t *ptr;
5c8a00ce 2615 MemoryRegion *mr;
149f54b5
PB
2616 hwaddr l = 4;
2617 hwaddr addr1;
8df1cd07 2618
2198a121 2619 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2620 true);
2621 if (l < 4 || !memory_access_is_direct(mr, true)) {
2622 io_mem_write(mr, addr1, val, 4);
8df1cd07 2623 } else {
5c8a00ce 2624 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2625 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2626 stl_p(ptr, val);
74576198
AL
2627
2628 if (unlikely(in_migration)) {
a2cd8c85 2629 if (cpu_physical_memory_is_clean(addr1)) {
74576198
AL
2630 /* invalidate code */
2631 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2632 /* set dirty bit */
6886867e 2633 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
74576198
AL
2634 }
2635 }
8df1cd07
FB
2636 }
2637}
2638
2639/* warning: addr must be aligned */
ab1da857
EI
2640static inline void stl_phys_internal(AddressSpace *as,
2641 hwaddr addr, uint32_t val,
1e78bcc1 2642 enum device_endian endian)
8df1cd07 2643{
8df1cd07 2644 uint8_t *ptr;
5c8a00ce 2645 MemoryRegion *mr;
149f54b5
PB
2646 hwaddr l = 4;
2647 hwaddr addr1;
8df1cd07 2648
ab1da857 2649 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2650 true);
2651 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2652#if defined(TARGET_WORDS_BIGENDIAN)
2653 if (endian == DEVICE_LITTLE_ENDIAN) {
2654 val = bswap32(val);
2655 }
2656#else
2657 if (endian == DEVICE_BIG_ENDIAN) {
2658 val = bswap32(val);
2659 }
2660#endif
5c8a00ce 2661 io_mem_write(mr, addr1, val, 4);
8df1cd07 2662 } else {
8df1cd07 2663 /* RAM case */
5c8a00ce 2664 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2665 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2666 switch (endian) {
2667 case DEVICE_LITTLE_ENDIAN:
2668 stl_le_p(ptr, val);
2669 break;
2670 case DEVICE_BIG_ENDIAN:
2671 stl_be_p(ptr, val);
2672 break;
2673 default:
2674 stl_p(ptr, val);
2675 break;
2676 }
51d7a9eb 2677 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2678 }
2679}
2680
ab1da857 2681void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2682{
ab1da857 2683 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2684}
2685
ab1da857 2686void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2687{
ab1da857 2688 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2689}
2690
ab1da857 2691void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2692{
ab1da857 2693 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2694}
2695
aab33094 2696/* XXX: optimize */
db3be60d 2697void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
aab33094
FB
2698{
2699 uint8_t v = val;
db3be60d 2700 address_space_rw(as, addr, &v, 1, 1);
aab33094
FB
2701}
2702
733f0b02 2703/* warning: addr must be aligned */
5ce5944d
EI
2704static inline void stw_phys_internal(AddressSpace *as,
2705 hwaddr addr, uint32_t val,
1e78bcc1 2706 enum device_endian endian)
aab33094 2707{
733f0b02 2708 uint8_t *ptr;
5c8a00ce 2709 MemoryRegion *mr;
149f54b5
PB
2710 hwaddr l = 2;
2711 hwaddr addr1;
733f0b02 2712
5ce5944d 2713 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 2714 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2715#if defined(TARGET_WORDS_BIGENDIAN)
2716 if (endian == DEVICE_LITTLE_ENDIAN) {
2717 val = bswap16(val);
2718 }
2719#else
2720 if (endian == DEVICE_BIG_ENDIAN) {
2721 val = bswap16(val);
2722 }
2723#endif
5c8a00ce 2724 io_mem_write(mr, addr1, val, 2);
733f0b02 2725 } else {
733f0b02 2726 /* RAM case */
5c8a00ce 2727 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2728 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2729 switch (endian) {
2730 case DEVICE_LITTLE_ENDIAN:
2731 stw_le_p(ptr, val);
2732 break;
2733 case DEVICE_BIG_ENDIAN:
2734 stw_be_p(ptr, val);
2735 break;
2736 default:
2737 stw_p(ptr, val);
2738 break;
2739 }
51d7a9eb 2740 invalidate_and_set_dirty(addr1, 2);
733f0b02 2741 }
aab33094
FB
2742}
2743
5ce5944d 2744void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2745{
5ce5944d 2746 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2747}
2748
5ce5944d 2749void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2750{
5ce5944d 2751 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2752}
2753
5ce5944d 2754void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2755{
5ce5944d 2756 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2757}
2758
aab33094 2759/* XXX: optimize */
f606604f 2760void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
aab33094
FB
2761{
2762 val = tswap64(val);
f606604f 2763 address_space_rw(as, addr, (void *) &val, 8, 1);
aab33094
FB
2764}
2765
f606604f 2766void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2767{
2768 val = cpu_to_le64(val);
f606604f 2769 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2770}
2771
f606604f 2772void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2773{
2774 val = cpu_to_be64(val);
f606604f 2775 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2776}
2777
5e2972fd 2778/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2779int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2780 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2781{
2782 int l;
a8170e5e 2783 hwaddr phys_addr;
9b3c35e0 2784 target_ulong page;
13eb76e0
FB
2785
2786 while (len > 0) {
2787 page = addr & TARGET_PAGE_MASK;
f17ec444 2788 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2789 /* if no physical page mapped, return an error */
2790 if (phys_addr == -1)
2791 return -1;
2792 l = (page + TARGET_PAGE_SIZE) - addr;
2793 if (l > len)
2794 l = len;
5e2972fd 2795 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
2796 if (is_write) {
2797 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2798 } else {
2799 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2800 }
13eb76e0
FB
2801 len -= l;
2802 buf += l;
2803 addr += l;
2804 }
2805 return 0;
2806}
a68fe89c 2807#endif
13eb76e0 2808
8e4a424b
BS
2809/*
2810 * A helper function for the _utterly broken_ virtio device model to find out if
2811 * it's running on a big endian machine. Don't do this at home kids!
2812 */
98ed8ecf
GK
2813bool target_words_bigendian(void);
2814bool target_words_bigendian(void)
8e4a424b
BS
2815{
2816#if defined(TARGET_WORDS_BIGENDIAN)
2817 return true;
2818#else
2819 return false;
2820#endif
2821}
2822
76f35538 2823#ifndef CONFIG_USER_ONLY
a8170e5e 2824bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2825{
5c8a00ce 2826 MemoryRegion*mr;
149f54b5 2827 hwaddr l = 1;
76f35538 2828
5c8a00ce
PB
2829 mr = address_space_translate(&address_space_memory,
2830 phys_addr, &phys_addr, &l, false);
76f35538 2831
5c8a00ce
PB
2832 return !(memory_region_is_ram(mr) ||
2833 memory_region_is_romd(mr));
76f35538 2834}
bd2fa51f
MH
2835
2836void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2837{
2838 RAMBlock *block;
2839
2840 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2841 func(block->host, block->offset, block->length, opaque);
2842 }
2843}
ec3f8c99 2844#endif
This page took 1.331944 seconds and 4 git commands to generate.