]> Git Repo - qemu.git/blame - exec.c
pc: limit DIMM address and size to page aligned values
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
cc9e98cb 29#include "hw/qdev.h"
1de7afc9 30#include "qemu/osdep.h"
9c17d615 31#include "sysemu/kvm.h"
2ff3de68 32#include "sysemu/sysemu.h"
0d09e41a 33#include "hw/xen/xen.h"
1de7afc9
PB
34#include "qemu/timer.h"
35#include "qemu/config-file.h"
75a34036 36#include "qemu/error-report.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
220c3ebd 52#include "exec/ram_addr.h"
67d95c15 53
b35ba30f
MT
54#include "qemu/range.h"
55
db7b5426 56//#define DEBUG_SUBPAGE
1196be37 57
e2eef170 58#if !defined(CONFIG_USER_ONLY)
981fdf23 59static bool in_migration;
94a6b54f 60
a3161038 61RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
62
63static MemoryRegion *system_memory;
309cb471 64static MemoryRegion *system_io;
62152b8a 65
f6790af6
AK
66AddressSpace address_space_io;
67AddressSpace address_space_memory;
2673a5da 68
0844e007 69MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 70static MemoryRegion io_mem_unassigned;
0e0df1e2 71
7bd4f430
PB
72/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73#define RAM_PREALLOC (1 << 0)
74
dbcb8981
PB
75/* RAM is mmap-ed with MAP_SHARED */
76#define RAM_SHARED (1 << 1)
77
e2eef170 78#endif
9fa3e853 79
bdc44640 80struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
81/* current CPU in the current thread. It is only valid inside
82 cpu_exec() */
4917cf44 83DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 84/* 0 = Do not count executed instructions.
bf20dc07 85 1 = Precise instruction counting.
2e70f6ef 86 2 = Adaptive rate instruction counting. */
5708fc66 87int use_icount;
6a00d601 88
e2eef170 89#if !defined(CONFIG_USER_ONLY)
4346ae3e 90
1db8abb1
PB
91typedef struct PhysPageEntry PhysPageEntry;
92
93struct PhysPageEntry {
9736e55b 94 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 95 uint32_t skip : 6;
9736e55b 96 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 97 uint32_t ptr : 26;
1db8abb1
PB
98};
99
8b795765
MT
100#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
101
03f49957 102/* Size of the L2 (and L3, etc) page tables. */
57271d63 103#define ADDR_SPACE_BITS 64
03f49957 104
026736ce 105#define P_L2_BITS 9
03f49957
PB
106#define P_L2_SIZE (1 << P_L2_BITS)
107
108#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
109
110typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 111
53cb28cb
MA
112typedef struct PhysPageMap {
113 unsigned sections_nb;
114 unsigned sections_nb_alloc;
115 unsigned nodes_nb;
116 unsigned nodes_nb_alloc;
117 Node *nodes;
118 MemoryRegionSection *sections;
119} PhysPageMap;
120
1db8abb1
PB
121struct AddressSpaceDispatch {
122 /* This is a multi-level map on the physical address space.
123 * The bottom level has pointers to MemoryRegionSections.
124 */
125 PhysPageEntry phys_map;
53cb28cb 126 PhysPageMap map;
acc9d80b 127 AddressSpace *as;
1db8abb1
PB
128};
129
90260c6c
JK
130#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
131typedef struct subpage_t {
132 MemoryRegion iomem;
acc9d80b 133 AddressSpace *as;
90260c6c
JK
134 hwaddr base;
135 uint16_t sub_section[TARGET_PAGE_SIZE];
136} subpage_t;
137
b41aac4f
LPF
138#define PHYS_SECTION_UNASSIGNED 0
139#define PHYS_SECTION_NOTDIRTY 1
140#define PHYS_SECTION_ROM 2
141#define PHYS_SECTION_WATCH 3
5312bd8b 142
e2eef170 143static void io_mem_init(void);
62152b8a 144static void memory_map_init(void);
09daed84 145static void tcg_commit(MemoryListener *listener);
e2eef170 146
1ec9b909 147static MemoryRegion io_mem_watch;
6658ffb8 148#endif
fd6ce8f6 149
6d9a1304 150#if !defined(CONFIG_USER_ONLY)
d6f2ea22 151
53cb28cb 152static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 153{
53cb28cb
MA
154 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
155 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
156 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
157 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 158 }
f7bf5461
AK
159}
160
53cb28cb 161static uint32_t phys_map_node_alloc(PhysPageMap *map)
f7bf5461
AK
162{
163 unsigned i;
8b795765 164 uint32_t ret;
f7bf5461 165
53cb28cb 166 ret = map->nodes_nb++;
f7bf5461 167 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 168 assert(ret != map->nodes_nb_alloc);
03f49957 169 for (i = 0; i < P_L2_SIZE; ++i) {
53cb28cb
MA
170 map->nodes[ret][i].skip = 1;
171 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 172 }
f7bf5461 173 return ret;
d6f2ea22
AK
174}
175
53cb28cb
MA
176static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
177 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 178 int level)
f7bf5461
AK
179{
180 PhysPageEntry *p;
181 int i;
03f49957 182 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 183
9736e55b 184 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
53cb28cb
MA
185 lp->ptr = phys_map_node_alloc(map);
186 p = map->nodes[lp->ptr];
f7bf5461 187 if (level == 0) {
03f49957 188 for (i = 0; i < P_L2_SIZE; i++) {
9736e55b 189 p[i].skip = 0;
b41aac4f 190 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 191 }
67c4d23c 192 }
f7bf5461 193 } else {
53cb28cb 194 p = map->nodes[lp->ptr];
92e873b9 195 }
03f49957 196 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 197
03f49957 198 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 199 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 200 lp->skip = 0;
c19e8800 201 lp->ptr = leaf;
07f07b31
AK
202 *index += step;
203 *nb -= step;
2999097b 204 } else {
53cb28cb 205 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
206 }
207 ++lp;
f7bf5461
AK
208 }
209}
210
ac1970fb 211static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 212 hwaddr index, hwaddr nb,
2999097b 213 uint16_t leaf)
f7bf5461 214{
2999097b 215 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 216 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 217
53cb28cb 218 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
219}
220
b35ba30f
MT
221/* Compact a non leaf page entry. Simply detect that the entry has a single child,
222 * and update our entry so we can skip it and go directly to the destination.
223 */
224static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
225{
226 unsigned valid_ptr = P_L2_SIZE;
227 int valid = 0;
228 PhysPageEntry *p;
229 int i;
230
231 if (lp->ptr == PHYS_MAP_NODE_NIL) {
232 return;
233 }
234
235 p = nodes[lp->ptr];
236 for (i = 0; i < P_L2_SIZE; i++) {
237 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
238 continue;
239 }
240
241 valid_ptr = i;
242 valid++;
243 if (p[i].skip) {
244 phys_page_compact(&p[i], nodes, compacted);
245 }
246 }
247
248 /* We can only compress if there's only one child. */
249 if (valid != 1) {
250 return;
251 }
252
253 assert(valid_ptr < P_L2_SIZE);
254
255 /* Don't compress if it won't fit in the # of bits we have. */
256 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
257 return;
258 }
259
260 lp->ptr = p[valid_ptr].ptr;
261 if (!p[valid_ptr].skip) {
262 /* If our only child is a leaf, make this a leaf. */
263 /* By design, we should have made this node a leaf to begin with so we
264 * should never reach here.
265 * But since it's so simple to handle this, let's do it just in case we
266 * change this rule.
267 */
268 lp->skip = 0;
269 } else {
270 lp->skip += p[valid_ptr].skip;
271 }
272}
273
274static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
275{
276 DECLARE_BITMAP(compacted, nodes_nb);
277
278 if (d->phys_map.skip) {
53cb28cb 279 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
280 }
281}
282
97115a8d 283static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 284 Node *nodes, MemoryRegionSection *sections)
92e873b9 285{
31ab2b4a 286 PhysPageEntry *p;
97115a8d 287 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 288 int i;
f1f6e3b8 289
9736e55b 290 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 291 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 292 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 293 }
9affd6fc 294 p = nodes[lp.ptr];
03f49957 295 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 296 }
b35ba30f
MT
297
298 if (sections[lp.ptr].size.hi ||
299 range_covers_byte(sections[lp.ptr].offset_within_address_space,
300 sections[lp.ptr].size.lo, addr)) {
301 return &sections[lp.ptr];
302 } else {
303 return &sections[PHYS_SECTION_UNASSIGNED];
304 }
f3705d53
AK
305}
306
e5548617
BS
307bool memory_region_is_unassigned(MemoryRegion *mr)
308{
2a8e7499 309 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 310 && mr != &io_mem_watch;
fd6ce8f6 311}
149f54b5 312
c7086b4a 313static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
314 hwaddr addr,
315 bool resolve_subpage)
9f029603 316{
90260c6c
JK
317 MemoryRegionSection *section;
318 subpage_t *subpage;
319
53cb28cb 320 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
321 if (resolve_subpage && section->mr->subpage) {
322 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 323 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
324 }
325 return section;
9f029603
JK
326}
327
90260c6c 328static MemoryRegionSection *
c7086b4a 329address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 330 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
331{
332 MemoryRegionSection *section;
a87f3954 333 Int128 diff;
149f54b5 334
c7086b4a 335 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
336 /* Compute offset within MemoryRegionSection */
337 addr -= section->offset_within_address_space;
338
339 /* Compute offset within MemoryRegion */
340 *xlat = addr + section->offset_within_region;
341
342 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 343 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
344 return section;
345}
90260c6c 346
a87f3954
PB
347static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
348{
349 if (memory_region_is_ram(mr)) {
350 return !(is_write && mr->readonly);
351 }
352 if (memory_region_is_romd(mr)) {
353 return !is_write;
354 }
355
356 return false;
357}
358
5c8a00ce
PB
359MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
360 hwaddr *xlat, hwaddr *plen,
361 bool is_write)
90260c6c 362{
30951157
AK
363 IOMMUTLBEntry iotlb;
364 MemoryRegionSection *section;
365 MemoryRegion *mr;
366 hwaddr len = *plen;
367
368 for (;;) {
a87f3954 369 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
370 mr = section->mr;
371
372 if (!mr->iommu_ops) {
373 break;
374 }
375
8d7b8cb9 376 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
377 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
378 | (addr & iotlb.addr_mask));
379 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
380 if (!(iotlb.perm & (1 << is_write))) {
381 mr = &io_mem_unassigned;
382 break;
383 }
384
385 as = iotlb.target_as;
386 }
387
fe680d0d 388 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954
PB
389 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
390 len = MIN(page, len);
391 }
392
30951157
AK
393 *plen = len;
394 *xlat = addr;
395 return mr;
90260c6c
JK
396}
397
398MemoryRegionSection *
399address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
400 hwaddr *plen)
401{
30951157 402 MemoryRegionSection *section;
c7086b4a 403 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
404
405 assert(!section->mr->iommu_ops);
406 return section;
90260c6c 407}
5b6dd868 408#endif
fd6ce8f6 409
5b6dd868 410void cpu_exec_init_all(void)
fdbb84d1 411{
5b6dd868 412#if !defined(CONFIG_USER_ONLY)
b2a8658e 413 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
414 memory_map_init();
415 io_mem_init();
fdbb84d1 416#endif
5b6dd868 417}
fdbb84d1 418
b170fce3 419#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
420
421static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 422{
259186a7 423 CPUState *cpu = opaque;
a513fe19 424
5b6dd868
BS
425 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
426 version_id is increased. */
259186a7 427 cpu->interrupt_request &= ~0x01;
c01a71c1 428 tlb_flush(cpu, 1);
5b6dd868
BS
429
430 return 0;
a513fe19 431}
7501267e 432
6c3bff0e
PD
433static int cpu_common_pre_load(void *opaque)
434{
435 CPUState *cpu = opaque;
436
437 cpu->exception_index = 0;
438
439 return 0;
440}
441
442static bool cpu_common_exception_index_needed(void *opaque)
443{
444 CPUState *cpu = opaque;
445
446 return cpu->exception_index != 0;
447}
448
449static const VMStateDescription vmstate_cpu_common_exception_index = {
450 .name = "cpu_common/exception_index",
451 .version_id = 1,
452 .minimum_version_id = 1,
453 .fields = (VMStateField[]) {
454 VMSTATE_INT32(exception_index, CPUState),
455 VMSTATE_END_OF_LIST()
456 }
457};
458
1a1562f5 459const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
460 .name = "cpu_common",
461 .version_id = 1,
462 .minimum_version_id = 1,
6c3bff0e 463 .pre_load = cpu_common_pre_load,
5b6dd868 464 .post_load = cpu_common_post_load,
35d08458 465 .fields = (VMStateField[]) {
259186a7
AF
466 VMSTATE_UINT32(halted, CPUState),
467 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 468 VMSTATE_END_OF_LIST()
6c3bff0e
PD
469 },
470 .subsections = (VMStateSubsection[]) {
471 {
472 .vmsd = &vmstate_cpu_common_exception_index,
473 .needed = cpu_common_exception_index_needed,
474 } , {
475 /* empty */
476 }
5b6dd868
BS
477 }
478};
1a1562f5 479
5b6dd868 480#endif
ea041c0e 481
38d8f5c8 482CPUState *qemu_get_cpu(int index)
ea041c0e 483{
bdc44640 484 CPUState *cpu;
ea041c0e 485
bdc44640 486 CPU_FOREACH(cpu) {
55e5c285 487 if (cpu->cpu_index == index) {
bdc44640 488 return cpu;
55e5c285 489 }
ea041c0e 490 }
5b6dd868 491
bdc44640 492 return NULL;
ea041c0e
FB
493}
494
09daed84
EI
495#if !defined(CONFIG_USER_ONLY)
496void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
497{
498 /* We only support one address space per cpu at the moment. */
499 assert(cpu->as == as);
500
501 if (cpu->tcg_as_listener) {
502 memory_listener_unregister(cpu->tcg_as_listener);
503 } else {
504 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
505 }
506 cpu->tcg_as_listener->commit = tcg_commit;
507 memory_listener_register(cpu->tcg_as_listener, as);
508}
509#endif
510
5b6dd868 511void cpu_exec_init(CPUArchState *env)
ea041c0e 512{
5b6dd868 513 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 514 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 515 CPUState *some_cpu;
5b6dd868
BS
516 int cpu_index;
517
518#if defined(CONFIG_USER_ONLY)
519 cpu_list_lock();
520#endif
5b6dd868 521 cpu_index = 0;
bdc44640 522 CPU_FOREACH(some_cpu) {
5b6dd868
BS
523 cpu_index++;
524 }
55e5c285 525 cpu->cpu_index = cpu_index;
1b1ed8dc 526 cpu->numa_node = 0;
f0c3c505 527 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 528 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 529#ifndef CONFIG_USER_ONLY
09daed84 530 cpu->as = &address_space_memory;
5b6dd868
BS
531 cpu->thread_id = qemu_get_thread_id();
532#endif
bdc44640 533 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
534#if defined(CONFIG_USER_ONLY)
535 cpu_list_unlock();
536#endif
e0d47944
AF
537 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
538 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
539 }
5b6dd868 540#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
541 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
542 cpu_save, cpu_load, env);
b170fce3 543 assert(cc->vmsd == NULL);
e0d47944 544 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 545#endif
b170fce3
AF
546 if (cc->vmsd != NULL) {
547 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
548 }
ea041c0e
FB
549}
550
1fddef4b 551#if defined(TARGET_HAS_ICE)
94df27fd 552#if defined(CONFIG_USER_ONLY)
00b941e5 553static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
554{
555 tb_invalidate_phys_page_range(pc, pc + 1, 0);
556}
557#else
00b941e5 558static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 559{
e8262a1b
MF
560 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
561 if (phys != -1) {
09daed84 562 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 563 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 564 }
1e7855a5 565}
c27004ec 566#endif
94df27fd 567#endif /* TARGET_HAS_ICE */
d720b93d 568
c527ee8f 569#if defined(CONFIG_USER_ONLY)
75a34036 570void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
571
572{
573}
574
3ee887e8
PM
575int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
576 int flags)
577{
578 return -ENOSYS;
579}
580
581void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
582{
583}
584
75a34036 585int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
586 int flags, CPUWatchpoint **watchpoint)
587{
588 return -ENOSYS;
589}
590#else
6658ffb8 591/* Add a watchpoint. */
75a34036 592int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 593 int flags, CPUWatchpoint **watchpoint)
6658ffb8 594{
c0ce998e 595 CPUWatchpoint *wp;
6658ffb8 596
05068c0d 597 /* forbid ranges which are empty or run off the end of the address space */
07e2863d 598 if (len == 0 || (addr + len - 1) < addr) {
75a34036
AF
599 error_report("tried to set invalid watchpoint at %"
600 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
601 return -EINVAL;
602 }
7267c094 603 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
604
605 wp->vaddr = addr;
05068c0d 606 wp->len = len;
a1d1bb31
AL
607 wp->flags = flags;
608
2dc9f411 609 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
610 if (flags & BP_GDB) {
611 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
612 } else {
613 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
614 }
6658ffb8 615
31b030d4 616 tlb_flush_page(cpu, addr);
a1d1bb31
AL
617
618 if (watchpoint)
619 *watchpoint = wp;
620 return 0;
6658ffb8
PB
621}
622
a1d1bb31 623/* Remove a specific watchpoint. */
75a34036 624int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 625 int flags)
6658ffb8 626{
a1d1bb31 627 CPUWatchpoint *wp;
6658ffb8 628
ff4700b0 629 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 630 if (addr == wp->vaddr && len == wp->len
6e140f28 631 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 632 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
633 return 0;
634 }
635 }
a1d1bb31 636 return -ENOENT;
6658ffb8
PB
637}
638
a1d1bb31 639/* Remove a specific watchpoint by reference. */
75a34036 640void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 641{
ff4700b0 642 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 643
31b030d4 644 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 645
7267c094 646 g_free(watchpoint);
a1d1bb31
AL
647}
648
649/* Remove all matching watchpoints. */
75a34036 650void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 651{
c0ce998e 652 CPUWatchpoint *wp, *next;
a1d1bb31 653
ff4700b0 654 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
655 if (wp->flags & mask) {
656 cpu_watchpoint_remove_by_ref(cpu, wp);
657 }
c0ce998e 658 }
7d03f82f 659}
05068c0d
PM
660
661/* Return true if this watchpoint address matches the specified
662 * access (ie the address range covered by the watchpoint overlaps
663 * partially or completely with the address range covered by the
664 * access).
665 */
666static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
667 vaddr addr,
668 vaddr len)
669{
670 /* We know the lengths are non-zero, but a little caution is
671 * required to avoid errors in the case where the range ends
672 * exactly at the top of the address space and so addr + len
673 * wraps round to zero.
674 */
675 vaddr wpend = wp->vaddr + wp->len - 1;
676 vaddr addrend = addr + len - 1;
677
678 return !(addr > wpend || wp->vaddr > addrend);
679}
680
c527ee8f 681#endif
7d03f82f 682
a1d1bb31 683/* Add a breakpoint. */
b3310ab3 684int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 685 CPUBreakpoint **breakpoint)
4c3a88a2 686{
1fddef4b 687#if defined(TARGET_HAS_ICE)
c0ce998e 688 CPUBreakpoint *bp;
3b46e624 689
7267c094 690 bp = g_malloc(sizeof(*bp));
4c3a88a2 691
a1d1bb31
AL
692 bp->pc = pc;
693 bp->flags = flags;
694
2dc9f411 695 /* keep all GDB-injected breakpoints in front */
00b941e5 696 if (flags & BP_GDB) {
f0c3c505 697 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 698 } else {
f0c3c505 699 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 700 }
3b46e624 701
f0c3c505 702 breakpoint_invalidate(cpu, pc);
a1d1bb31 703
00b941e5 704 if (breakpoint) {
a1d1bb31 705 *breakpoint = bp;
00b941e5 706 }
4c3a88a2
FB
707 return 0;
708#else
a1d1bb31 709 return -ENOSYS;
4c3a88a2
FB
710#endif
711}
712
a1d1bb31 713/* Remove a specific breakpoint. */
b3310ab3 714int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 715{
7d03f82f 716#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
717 CPUBreakpoint *bp;
718
f0c3c505 719 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 720 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 721 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
722 return 0;
723 }
7d03f82f 724 }
a1d1bb31
AL
725 return -ENOENT;
726#else
727 return -ENOSYS;
7d03f82f
EI
728#endif
729}
730
a1d1bb31 731/* Remove a specific breakpoint by reference. */
b3310ab3 732void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 733{
1fddef4b 734#if defined(TARGET_HAS_ICE)
f0c3c505
AF
735 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
736
737 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 738
7267c094 739 g_free(breakpoint);
a1d1bb31
AL
740#endif
741}
742
743/* Remove all matching breakpoints. */
b3310ab3 744void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31
AL
745{
746#if defined(TARGET_HAS_ICE)
c0ce998e 747 CPUBreakpoint *bp, *next;
a1d1bb31 748
f0c3c505 749 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
750 if (bp->flags & mask) {
751 cpu_breakpoint_remove_by_ref(cpu, bp);
752 }
c0ce998e 753 }
4c3a88a2
FB
754#endif
755}
756
c33a346e
FB
757/* enable or disable single step mode. EXCP_DEBUG is returned by the
758 CPU loop after each instruction */
3825b28f 759void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 760{
1fddef4b 761#if defined(TARGET_HAS_ICE)
ed2803da
AF
762 if (cpu->singlestep_enabled != enabled) {
763 cpu->singlestep_enabled = enabled;
764 if (kvm_enabled()) {
38e478ec 765 kvm_update_guest_debug(cpu, 0);
ed2803da 766 } else {
ccbb4d44 767 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 768 /* XXX: only flush what is necessary */
38e478ec 769 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
770 tb_flush(env);
771 }
c33a346e
FB
772 }
773#endif
774}
775
a47dddd7 776void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
777{
778 va_list ap;
493ae1f0 779 va_list ap2;
7501267e
FB
780
781 va_start(ap, fmt);
493ae1f0 782 va_copy(ap2, ap);
7501267e
FB
783 fprintf(stderr, "qemu: fatal: ");
784 vfprintf(stderr, fmt, ap);
785 fprintf(stderr, "\n");
878096ee 786 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
787 if (qemu_log_enabled()) {
788 qemu_log("qemu: fatal: ");
789 qemu_log_vprintf(fmt, ap2);
790 qemu_log("\n");
a0762859 791 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 792 qemu_log_flush();
93fcfe39 793 qemu_log_close();
924edcae 794 }
493ae1f0 795 va_end(ap2);
f9373291 796 va_end(ap);
fd052bf6
RV
797#if defined(CONFIG_USER_ONLY)
798 {
799 struct sigaction act;
800 sigfillset(&act.sa_mask);
801 act.sa_handler = SIG_DFL;
802 sigaction(SIGABRT, &act, NULL);
803 }
804#endif
7501267e
FB
805 abort();
806}
807
0124311e 808#if !defined(CONFIG_USER_ONLY)
041603fe
PB
809static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
810{
811 RAMBlock *block;
812
813 /* The list is protected by the iothread lock here. */
814 block = ram_list.mru_block;
815 if (block && addr - block->offset < block->length) {
816 goto found;
817 }
818 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
819 if (addr - block->offset < block->length) {
820 goto found;
821 }
822 }
823
824 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
825 abort();
826
827found:
828 ram_list.mru_block = block;
829 return block;
830}
831
a2f4d5be 832static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 833{
041603fe 834 ram_addr_t start1;
a2f4d5be
JQ
835 RAMBlock *block;
836 ram_addr_t end;
837
838 end = TARGET_PAGE_ALIGN(start + length);
839 start &= TARGET_PAGE_MASK;
d24981d3 840
041603fe
PB
841 block = qemu_get_ram_block(start);
842 assert(block == qemu_get_ram_block(end - 1));
843 start1 = (uintptr_t)block->host + (start - block->offset);
844 cpu_tlb_reset_dirty_all(start1, length);
d24981d3
JQ
845}
846
5579c7f3 847/* Note: start and end must be within the same ram block. */
a2f4d5be 848void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 849 unsigned client)
1ccde1cb 850{
1ccde1cb
FB
851 if (length == 0)
852 return;
ace694cc 853 cpu_physical_memory_clear_dirty_range(start, length, client);
f23db169 854
d24981d3 855 if (tcg_enabled()) {
a2f4d5be 856 tlb_reset_dirty_range_all(start, length);
5579c7f3 857 }
1ccde1cb
FB
858}
859
981fdf23 860static void cpu_physical_memory_set_dirty_tracking(bool enable)
74576198
AL
861{
862 in_migration = enable;
74576198
AL
863}
864
bb0e627a 865hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
866 MemoryRegionSection *section,
867 target_ulong vaddr,
868 hwaddr paddr, hwaddr xlat,
869 int prot,
870 target_ulong *address)
e5548617 871{
a8170e5e 872 hwaddr iotlb;
e5548617
BS
873 CPUWatchpoint *wp;
874
cc5bea60 875 if (memory_region_is_ram(section->mr)) {
e5548617
BS
876 /* Normal RAM. */
877 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 878 + xlat;
e5548617 879 if (!section->readonly) {
b41aac4f 880 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 881 } else {
b41aac4f 882 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
883 }
884 } else {
1b3fb98f 885 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 886 iotlb += xlat;
e5548617
BS
887 }
888
889 /* Make accesses to pages with watchpoints go via the
890 watchpoint trap routines. */
ff4700b0 891 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 892 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
893 /* Avoid trapping reads of pages with a write breakpoint. */
894 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 895 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
896 *address |= TLB_MMIO;
897 break;
898 }
899 }
900 }
901
902 return iotlb;
903}
9fa3e853
FB
904#endif /* defined(CONFIG_USER_ONLY) */
905
e2eef170 906#if !defined(CONFIG_USER_ONLY)
8da3ff18 907
c227f099 908static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 909 uint16_t section);
acc9d80b 910static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 911
575ddeb4 912static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
91138037
MA
913
914/*
915 * Set a custom physical guest memory alloator.
916 * Accelerators with unusual needs may need this. Hopefully, we can
917 * get rid of it eventually.
918 */
575ddeb4 919void phys_mem_set_alloc(void *(*alloc)(size_t))
91138037
MA
920{
921 phys_mem_alloc = alloc;
922}
923
53cb28cb
MA
924static uint16_t phys_section_add(PhysPageMap *map,
925 MemoryRegionSection *section)
5312bd8b 926{
68f3f65b
PB
927 /* The physical section number is ORed with a page-aligned
928 * pointer to produce the iotlb entries. Thus it should
929 * never overflow into the page-aligned value.
930 */
53cb28cb 931 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 932
53cb28cb
MA
933 if (map->sections_nb == map->sections_nb_alloc) {
934 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
935 map->sections = g_renew(MemoryRegionSection, map->sections,
936 map->sections_nb_alloc);
5312bd8b 937 }
53cb28cb 938 map->sections[map->sections_nb] = *section;
dfde4e6e 939 memory_region_ref(section->mr);
53cb28cb 940 return map->sections_nb++;
5312bd8b
AK
941}
942
058bc4b5
PB
943static void phys_section_destroy(MemoryRegion *mr)
944{
dfde4e6e
PB
945 memory_region_unref(mr);
946
058bc4b5
PB
947 if (mr->subpage) {
948 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 949 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
950 g_free(subpage);
951 }
952}
953
6092666e 954static void phys_sections_free(PhysPageMap *map)
5312bd8b 955{
9affd6fc
PB
956 while (map->sections_nb > 0) {
957 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
958 phys_section_destroy(section->mr);
959 }
9affd6fc
PB
960 g_free(map->sections);
961 g_free(map->nodes);
5312bd8b
AK
962}
963
ac1970fb 964static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
965{
966 subpage_t *subpage;
a8170e5e 967 hwaddr base = section->offset_within_address_space
0f0cb164 968 & TARGET_PAGE_MASK;
97115a8d 969 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 970 d->map.nodes, d->map.sections);
0f0cb164
AK
971 MemoryRegionSection subsection = {
972 .offset_within_address_space = base,
052e87b0 973 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 974 };
a8170e5e 975 hwaddr start, end;
0f0cb164 976
f3705d53 977 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 978
f3705d53 979 if (!(existing->mr->subpage)) {
acc9d80b 980 subpage = subpage_init(d->as, base);
3be91e86 981 subsection.address_space = d->as;
0f0cb164 982 subsection.mr = &subpage->iomem;
ac1970fb 983 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 984 phys_section_add(&d->map, &subsection));
0f0cb164 985 } else {
f3705d53 986 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
987 }
988 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 989 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
990 subpage_register(subpage, start, end,
991 phys_section_add(&d->map, section));
0f0cb164
AK
992}
993
994
052e87b0
PB
995static void register_multipage(AddressSpaceDispatch *d,
996 MemoryRegionSection *section)
33417e70 997{
a8170e5e 998 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 999 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
1000 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1001 TARGET_PAGE_BITS));
dd81124b 1002
733d5ef5
PB
1003 assert(num_pages);
1004 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
1005}
1006
ac1970fb 1007static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 1008{
89ae337a 1009 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 1010 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 1011 MemoryRegionSection now = *section, remain = *section;
052e87b0 1012 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1013
733d5ef5
PB
1014 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1015 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1016 - now.offset_within_address_space;
1017
052e87b0 1018 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 1019 register_subpage(d, &now);
733d5ef5 1020 } else {
052e87b0 1021 now.size = int128_zero();
733d5ef5 1022 }
052e87b0
PB
1023 while (int128_ne(remain.size, now.size)) {
1024 remain.size = int128_sub(remain.size, now.size);
1025 remain.offset_within_address_space += int128_get64(now.size);
1026 remain.offset_within_region += int128_get64(now.size);
69b67646 1027 now = remain;
052e87b0 1028 if (int128_lt(remain.size, page_size)) {
733d5ef5 1029 register_subpage(d, &now);
88266249 1030 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1031 now.size = page_size;
ac1970fb 1032 register_subpage(d, &now);
69b67646 1033 } else {
052e87b0 1034 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1035 register_multipage(d, &now);
69b67646 1036 }
0f0cb164
AK
1037 }
1038}
1039
62a2744c
SY
1040void qemu_flush_coalesced_mmio_buffer(void)
1041{
1042 if (kvm_enabled())
1043 kvm_flush_coalesced_mmio_buffer();
1044}
1045
b2a8658e
UD
1046void qemu_mutex_lock_ramlist(void)
1047{
1048 qemu_mutex_lock(&ram_list.mutex);
1049}
1050
1051void qemu_mutex_unlock_ramlist(void)
1052{
1053 qemu_mutex_unlock(&ram_list.mutex);
1054}
1055
e1e84ba0 1056#ifdef __linux__
c902760f
MT
1057
1058#include <sys/vfs.h>
1059
1060#define HUGETLBFS_MAGIC 0x958458f6
1061
fc7a5800 1062static long gethugepagesize(const char *path, Error **errp)
c902760f
MT
1063{
1064 struct statfs fs;
1065 int ret;
1066
1067 do {
9742bf26 1068 ret = statfs(path, &fs);
c902760f
MT
1069 } while (ret != 0 && errno == EINTR);
1070
1071 if (ret != 0) {
fc7a5800
HT
1072 error_setg_errno(errp, errno, "failed to get page size of file %s",
1073 path);
9742bf26 1074 return 0;
c902760f
MT
1075 }
1076
1077 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1078 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1079
1080 return fs.f_bsize;
1081}
1082
04b16653
AW
1083static void *file_ram_alloc(RAMBlock *block,
1084 ram_addr_t memory,
7f56e740
PB
1085 const char *path,
1086 Error **errp)
c902760f
MT
1087{
1088 char *filename;
8ca761f6
PF
1089 char *sanitized_name;
1090 char *c;
557529dd 1091 void *area = NULL;
c902760f 1092 int fd;
557529dd 1093 uint64_t hpagesize;
fc7a5800 1094 Error *local_err = NULL;
c902760f 1095
fc7a5800
HT
1096 hpagesize = gethugepagesize(path, &local_err);
1097 if (local_err) {
1098 error_propagate(errp, local_err);
f9a49dfa 1099 goto error;
c902760f
MT
1100 }
1101
1102 if (memory < hpagesize) {
557529dd
HT
1103 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1104 "or larger than huge page size 0x%" PRIx64,
1105 memory, hpagesize);
1106 goto error;
c902760f
MT
1107 }
1108
1109 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1110 error_setg(errp,
1111 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1112 goto error;
c902760f
MT
1113 }
1114
8ca761f6 1115 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1116 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1117 for (c = sanitized_name; *c != '\0'; c++) {
1118 if (*c == '/')
1119 *c = '_';
1120 }
1121
1122 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1123 sanitized_name);
1124 g_free(sanitized_name);
c902760f
MT
1125
1126 fd = mkstemp(filename);
1127 if (fd < 0) {
7f56e740
PB
1128 error_setg_errno(errp, errno,
1129 "unable to create backing store for hugepages");
e4ada482 1130 g_free(filename);
f9a49dfa 1131 goto error;
c902760f
MT
1132 }
1133 unlink(filename);
e4ada482 1134 g_free(filename);
c902760f
MT
1135
1136 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1137
1138 /*
1139 * ftruncate is not supported by hugetlbfs in older
1140 * hosts, so don't bother bailing out on errors.
1141 * If anything goes wrong with it under other filesystems,
1142 * mmap will fail.
1143 */
7f56e740 1144 if (ftruncate(fd, memory)) {
9742bf26 1145 perror("ftruncate");
7f56e740 1146 }
c902760f 1147
dbcb8981
PB
1148 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1149 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1150 fd, 0);
c902760f 1151 if (area == MAP_FAILED) {
7f56e740
PB
1152 error_setg_errno(errp, errno,
1153 "unable to map backing store for hugepages");
9742bf26 1154 close(fd);
f9a49dfa 1155 goto error;
c902760f 1156 }
ef36fa14
MT
1157
1158 if (mem_prealloc) {
38183310 1159 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1160 }
1161
04b16653 1162 block->fd = fd;
c902760f 1163 return area;
f9a49dfa
MT
1164
1165error:
1166 if (mem_prealloc) {
e4d9df4f 1167 error_report("%s\n", error_get_pretty(*errp));
f9a49dfa
MT
1168 exit(1);
1169 }
1170 return NULL;
c902760f
MT
1171}
1172#endif
1173
d17b5288 1174static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1175{
1176 RAMBlock *block, *next_block;
3e837b2c 1177 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1178
49cd9ac6
SH
1179 assert(size != 0); /* it would hand out same offset multiple times */
1180
a3161038 1181 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1182 return 0;
1183
a3161038 1184 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1185 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1186
1187 end = block->offset + block->length;
1188
a3161038 1189 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1190 if (next_block->offset >= end) {
1191 next = MIN(next, next_block->offset);
1192 }
1193 }
1194 if (next - end >= size && next - end < mingap) {
3e837b2c 1195 offset = end;
04b16653
AW
1196 mingap = next - end;
1197 }
1198 }
3e837b2c
AW
1199
1200 if (offset == RAM_ADDR_MAX) {
1201 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1202 (uint64_t)size);
1203 abort();
1204 }
1205
04b16653
AW
1206 return offset;
1207}
1208
652d7ec2 1209ram_addr_t last_ram_offset(void)
d17b5288
AW
1210{
1211 RAMBlock *block;
1212 ram_addr_t last = 0;
1213
a3161038 1214 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1215 last = MAX(last, block->offset + block->length);
1216
1217 return last;
1218}
1219
ddb97f1d
JB
1220static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1221{
1222 int ret;
ddb97f1d
JB
1223
1224 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1225 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1226 "dump-guest-core", true)) {
ddb97f1d
JB
1227 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1228 if (ret) {
1229 perror("qemu_madvise");
1230 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1231 "but dump_guest_core=off specified\n");
1232 }
1233 }
1234}
1235
20cfe881 1236static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1237{
20cfe881 1238 RAMBlock *block;
84b89d78 1239
a3161038 1240 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1241 if (block->offset == addr) {
20cfe881 1242 return block;
c5705a77
AK
1243 }
1244 }
20cfe881
HT
1245
1246 return NULL;
1247}
1248
1249void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1250{
1251 RAMBlock *new_block = find_ram_block(addr);
1252 RAMBlock *block;
1253
c5705a77
AK
1254 assert(new_block);
1255 assert(!new_block->idstr[0]);
84b89d78 1256
09e5ab63
AL
1257 if (dev) {
1258 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1259 if (id) {
1260 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1261 g_free(id);
84b89d78
CM
1262 }
1263 }
1264 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1265
b2a8658e
UD
1266 /* This assumes the iothread lock is taken here too. */
1267 qemu_mutex_lock_ramlist();
a3161038 1268 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1269 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1270 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1271 new_block->idstr);
1272 abort();
1273 }
1274 }
b2a8658e 1275 qemu_mutex_unlock_ramlist();
c5705a77
AK
1276}
1277
20cfe881
HT
1278void qemu_ram_unset_idstr(ram_addr_t addr)
1279{
1280 RAMBlock *block = find_ram_block(addr);
1281
1282 if (block) {
1283 memset(block->idstr, 0, sizeof(block->idstr));
1284 }
1285}
1286
8490fc78
LC
1287static int memory_try_enable_merging(void *addr, size_t len)
1288{
2ff3de68 1289 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1290 /* disabled by the user */
1291 return 0;
1292 }
1293
1294 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1295}
1296
ef701d7b 1297static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1298{
e1c57ab8 1299 RAMBlock *block;
2152f5ca
JQ
1300 ram_addr_t old_ram_size, new_ram_size;
1301
1302 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1303
b2a8658e
UD
1304 /* This assumes the iothread lock is taken here too. */
1305 qemu_mutex_lock_ramlist();
e1c57ab8
PB
1306 new_block->offset = find_ram_offset(new_block->length);
1307
1308 if (!new_block->host) {
1309 if (xen_enabled()) {
1310 xen_ram_alloc(new_block->offset, new_block->length, new_block->mr);
1311 } else {
1312 new_block->host = phys_mem_alloc(new_block->length);
39228250 1313 if (!new_block->host) {
ef701d7b
HT
1314 error_setg_errno(errp, errno,
1315 "cannot set up guest memory '%s'",
1316 memory_region_name(new_block->mr));
1317 qemu_mutex_unlock_ramlist();
1318 return -1;
39228250 1319 }
e1c57ab8 1320 memory_try_enable_merging(new_block->host, new_block->length);
6977dfe6 1321 }
c902760f 1322 }
94a6b54f 1323
abb26d63
PB
1324 /* Keep the list sorted from biggest to smallest block. */
1325 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1326 if (block->length < new_block->length) {
1327 break;
1328 }
1329 }
1330 if (block) {
1331 QTAILQ_INSERT_BEFORE(block, new_block, next);
1332 } else {
1333 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1334 }
0d6d3c87 1335 ram_list.mru_block = NULL;
94a6b54f 1336
f798b07f 1337 ram_list.version++;
b2a8658e 1338 qemu_mutex_unlock_ramlist();
f798b07f 1339
2152f5ca
JQ
1340 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1341
1342 if (new_ram_size > old_ram_size) {
1ab4c8ce
JQ
1343 int i;
1344 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1345 ram_list.dirty_memory[i] =
1346 bitmap_zero_extend(ram_list.dirty_memory[i],
1347 old_ram_size, new_ram_size);
1348 }
2152f5ca 1349 }
e1c57ab8 1350 cpu_physical_memory_set_dirty_range(new_block->offset, new_block->length);
94a6b54f 1351
e1c57ab8
PB
1352 qemu_ram_setup_dump(new_block->host, new_block->length);
1353 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE);
1354 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK);
ddb97f1d 1355
e1c57ab8
PB
1356 if (kvm_enabled()) {
1357 kvm_setup_guest_memory(new_block->host, new_block->length);
1358 }
6f0437e8 1359
94a6b54f
PB
1360 return new_block->offset;
1361}
e9a1ab19 1362
0b183fc8 1363#ifdef __linux__
e1c57ab8 1364ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1365 bool share, const char *mem_path,
7f56e740 1366 Error **errp)
e1c57ab8
PB
1367{
1368 RAMBlock *new_block;
ef701d7b
HT
1369 ram_addr_t addr;
1370 Error *local_err = NULL;
e1c57ab8
PB
1371
1372 if (xen_enabled()) {
7f56e740
PB
1373 error_setg(errp, "-mem-path not supported with Xen");
1374 return -1;
e1c57ab8
PB
1375 }
1376
1377 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1378 /*
1379 * file_ram_alloc() needs to allocate just like
1380 * phys_mem_alloc, but we haven't bothered to provide
1381 * a hook there.
1382 */
7f56e740
PB
1383 error_setg(errp,
1384 "-mem-path not supported with this accelerator");
1385 return -1;
e1c57ab8
PB
1386 }
1387
1388 size = TARGET_PAGE_ALIGN(size);
1389 new_block = g_malloc0(sizeof(*new_block));
1390 new_block->mr = mr;
1391 new_block->length = size;
dbcb8981 1392 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1393 new_block->host = file_ram_alloc(new_block, size,
1394 mem_path, errp);
1395 if (!new_block->host) {
1396 g_free(new_block);
1397 return -1;
1398 }
1399
ef701d7b
HT
1400 addr = ram_block_add(new_block, &local_err);
1401 if (local_err) {
1402 g_free(new_block);
1403 error_propagate(errp, local_err);
1404 return -1;
1405 }
1406 return addr;
e1c57ab8 1407}
0b183fc8 1408#endif
e1c57ab8
PB
1409
1410ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
ef701d7b 1411 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1412{
1413 RAMBlock *new_block;
ef701d7b
HT
1414 ram_addr_t addr;
1415 Error *local_err = NULL;
e1c57ab8
PB
1416
1417 size = TARGET_PAGE_ALIGN(size);
1418 new_block = g_malloc0(sizeof(*new_block));
1419 new_block->mr = mr;
1420 new_block->length = size;
1421 new_block->fd = -1;
1422 new_block->host = host;
1423 if (host) {
7bd4f430 1424 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1425 }
ef701d7b
HT
1426 addr = ram_block_add(new_block, &local_err);
1427 if (local_err) {
1428 g_free(new_block);
1429 error_propagate(errp, local_err);
1430 return -1;
1431 }
1432 return addr;
e1c57ab8
PB
1433}
1434
ef701d7b 1435ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1436{
ef701d7b 1437 return qemu_ram_alloc_from_ptr(size, NULL, mr, errp);
6977dfe6
YT
1438}
1439
1f2e98b6
AW
1440void qemu_ram_free_from_ptr(ram_addr_t addr)
1441{
1442 RAMBlock *block;
1443
b2a8658e
UD
1444 /* This assumes the iothread lock is taken here too. */
1445 qemu_mutex_lock_ramlist();
a3161038 1446 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1447 if (addr == block->offset) {
a3161038 1448 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1449 ram_list.mru_block = NULL;
f798b07f 1450 ram_list.version++;
7267c094 1451 g_free(block);
b2a8658e 1452 break;
1f2e98b6
AW
1453 }
1454 }
b2a8658e 1455 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1456}
1457
c227f099 1458void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1459{
04b16653
AW
1460 RAMBlock *block;
1461
b2a8658e
UD
1462 /* This assumes the iothread lock is taken here too. */
1463 qemu_mutex_lock_ramlist();
a3161038 1464 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1465 if (addr == block->offset) {
a3161038 1466 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1467 ram_list.mru_block = NULL;
f798b07f 1468 ram_list.version++;
7bd4f430 1469 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1470 ;
dfeaf2ab
MA
1471 } else if (xen_enabled()) {
1472 xen_invalidate_map_cache_entry(block->host);
089f3f76 1473#ifndef _WIN32
3435f395
MA
1474 } else if (block->fd >= 0) {
1475 munmap(block->host, block->length);
1476 close(block->fd);
089f3f76 1477#endif
04b16653 1478 } else {
dfeaf2ab 1479 qemu_anon_ram_free(block->host, block->length);
04b16653 1480 }
7267c094 1481 g_free(block);
b2a8658e 1482 break;
04b16653
AW
1483 }
1484 }
b2a8658e 1485 qemu_mutex_unlock_ramlist();
04b16653 1486
e9a1ab19
FB
1487}
1488
cd19cfa2
HY
1489#ifndef _WIN32
1490void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1491{
1492 RAMBlock *block;
1493 ram_addr_t offset;
1494 int flags;
1495 void *area, *vaddr;
1496
a3161038 1497 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1498 offset = addr - block->offset;
1499 if (offset < block->length) {
1500 vaddr = block->host + offset;
7bd4f430 1501 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1502 ;
dfeaf2ab
MA
1503 } else if (xen_enabled()) {
1504 abort();
cd19cfa2
HY
1505 } else {
1506 flags = MAP_FIXED;
1507 munmap(vaddr, length);
3435f395 1508 if (block->fd >= 0) {
dbcb8981
PB
1509 flags |= (block->flags & RAM_SHARED ?
1510 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1511 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1512 flags, block->fd, offset);
cd19cfa2 1513 } else {
2eb9fbaa
MA
1514 /*
1515 * Remap needs to match alloc. Accelerators that
1516 * set phys_mem_alloc never remap. If they did,
1517 * we'd need a remap hook here.
1518 */
1519 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1520
cd19cfa2
HY
1521 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1522 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1523 flags, -1, 0);
cd19cfa2
HY
1524 }
1525 if (area != vaddr) {
f15fbc4b
AP
1526 fprintf(stderr, "Could not remap addr: "
1527 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1528 length, addr);
1529 exit(1);
1530 }
8490fc78 1531 memory_try_enable_merging(vaddr, length);
ddb97f1d 1532 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1533 }
1534 return;
1535 }
1536 }
1537}
1538#endif /* !_WIN32 */
1539
a35ba7be
PB
1540int qemu_get_ram_fd(ram_addr_t addr)
1541{
1542 RAMBlock *block = qemu_get_ram_block(addr);
1543
1544 return block->fd;
1545}
1546
3fd74b84
DM
1547void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1548{
1549 RAMBlock *block = qemu_get_ram_block(addr);
1550
1551 return block->host;
1552}
1553
1b5ec234
PB
1554/* Return a host pointer to ram allocated with qemu_ram_alloc.
1555 With the exception of the softmmu code in this file, this should
1556 only be used for local memory (e.g. video ram) that the device owns,
1557 and knows it isn't going to access beyond the end of the block.
1558
1559 It should not be used for general purpose DMA.
1560 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1561 */
1562void *qemu_get_ram_ptr(ram_addr_t addr)
1563{
1564 RAMBlock *block = qemu_get_ram_block(addr);
1565
0d6d3c87
PB
1566 if (xen_enabled()) {
1567 /* We need to check if the requested address is in the RAM
1568 * because we don't want to map the entire memory in QEMU.
1569 * In that case just map until the end of the page.
1570 */
1571 if (block->offset == 0) {
1572 return xen_map_cache(addr, 0, 0);
1573 } else if (block->host == NULL) {
1574 block->host =
1575 xen_map_cache(block->offset, block->length, 1);
1576 }
1577 }
1578 return block->host + (addr - block->offset);
dc828ca1
PB
1579}
1580
38bee5dc
SS
1581/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1582 * but takes a size argument */
cb85f7ab 1583static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1584{
8ab934f9
SS
1585 if (*size == 0) {
1586 return NULL;
1587 }
868bb33f 1588 if (xen_enabled()) {
e41d7c69 1589 return xen_map_cache(addr, *size, 1);
868bb33f 1590 } else {
38bee5dc
SS
1591 RAMBlock *block;
1592
a3161038 1593 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1594 if (addr - block->offset < block->length) {
1595 if (addr - block->offset + *size > block->length)
1596 *size = block->length - addr + block->offset;
1597 return block->host + (addr - block->offset);
1598 }
1599 }
1600
1601 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1602 abort();
38bee5dc
SS
1603 }
1604}
1605
7443b437
PB
1606/* Some of the softmmu routines need to translate from a host pointer
1607 (typically a TLB entry) back to a ram offset. */
1b5ec234 1608MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1609{
94a6b54f
PB
1610 RAMBlock *block;
1611 uint8_t *host = ptr;
1612
868bb33f 1613 if (xen_enabled()) {
e41d7c69 1614 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1615 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1616 }
1617
23887b79
PB
1618 block = ram_list.mru_block;
1619 if (block && block->host && host - block->host < block->length) {
1620 goto found;
1621 }
1622
a3161038 1623 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1624 /* This case append when the block is not mapped. */
1625 if (block->host == NULL) {
1626 continue;
1627 }
f471a17e 1628 if (host - block->host < block->length) {
23887b79 1629 goto found;
f471a17e 1630 }
94a6b54f 1631 }
432d268c 1632
1b5ec234 1633 return NULL;
23887b79
PB
1634
1635found:
1636 *ram_addr = block->offset + (host - block->host);
1b5ec234 1637 return block->mr;
e890261f 1638}
f471a17e 1639
a8170e5e 1640static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1641 uint64_t val, unsigned size)
9fa3e853 1642{
52159192 1643 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1644 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1645 }
0e0df1e2
AK
1646 switch (size) {
1647 case 1:
1648 stb_p(qemu_get_ram_ptr(ram_addr), val);
1649 break;
1650 case 2:
1651 stw_p(qemu_get_ram_ptr(ram_addr), val);
1652 break;
1653 case 4:
1654 stl_p(qemu_get_ram_ptr(ram_addr), val);
1655 break;
1656 default:
1657 abort();
3a7d929e 1658 }
6886867e 1659 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
f23db169
FB
1660 /* we remove the notdirty callback only if the code has been
1661 flushed */
a2cd8c85 1662 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1663 CPUArchState *env = current_cpu->env_ptr;
93afeade 1664 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1665 }
9fa3e853
FB
1666}
1667
b018ddf6
PB
1668static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1669 unsigned size, bool is_write)
1670{
1671 return is_write;
1672}
1673
0e0df1e2 1674static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1675 .write = notdirty_mem_write,
b018ddf6 1676 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1677 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1678};
1679
0f459d16 1680/* Generate a debug exception if a watchpoint has been hit. */
05068c0d 1681static void check_watchpoint(int offset, int len, int flags)
0f459d16 1682{
93afeade
AF
1683 CPUState *cpu = current_cpu;
1684 CPUArchState *env = cpu->env_ptr;
06d55cc1 1685 target_ulong pc, cs_base;
0f459d16 1686 target_ulong vaddr;
a1d1bb31 1687 CPUWatchpoint *wp;
06d55cc1 1688 int cpu_flags;
0f459d16 1689
ff4700b0 1690 if (cpu->watchpoint_hit) {
06d55cc1
AL
1691 /* We re-entered the check after replacing the TB. Now raise
1692 * the debug interrupt so that is will trigger after the
1693 * current instruction. */
93afeade 1694 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1695 return;
1696 }
93afeade 1697 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1698 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
1699 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1700 && (wp->flags & flags)) {
08225676
PM
1701 if (flags == BP_MEM_READ) {
1702 wp->flags |= BP_WATCHPOINT_HIT_READ;
1703 } else {
1704 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1705 }
1706 wp->hitaddr = vaddr;
ff4700b0
AF
1707 if (!cpu->watchpoint_hit) {
1708 cpu->watchpoint_hit = wp;
239c51a5 1709 tb_check_watchpoint(cpu);
6e140f28 1710 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1711 cpu->exception_index = EXCP_DEBUG;
5638d180 1712 cpu_loop_exit(cpu);
6e140f28
AL
1713 } else {
1714 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1715 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1716 cpu_resume_from_signal(cpu, NULL);
6e140f28 1717 }
06d55cc1 1718 }
6e140f28
AL
1719 } else {
1720 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1721 }
1722 }
1723}
1724
6658ffb8
PB
1725/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1726 so these check for a hit then pass through to the normal out-of-line
1727 phys routines. */
a8170e5e 1728static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1729 unsigned size)
6658ffb8 1730{
05068c0d 1731 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ);
1ec9b909 1732 switch (size) {
2c17449b 1733 case 1: return ldub_phys(&address_space_memory, addr);
41701aa4 1734 case 2: return lduw_phys(&address_space_memory, addr);
fdfba1a2 1735 case 4: return ldl_phys(&address_space_memory, addr);
1ec9b909
AK
1736 default: abort();
1737 }
6658ffb8
PB
1738}
1739
a8170e5e 1740static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1741 uint64_t val, unsigned size)
6658ffb8 1742{
05068c0d 1743 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE);
1ec9b909 1744 switch (size) {
67364150 1745 case 1:
db3be60d 1746 stb_phys(&address_space_memory, addr, val);
67364150
MF
1747 break;
1748 case 2:
5ce5944d 1749 stw_phys(&address_space_memory, addr, val);
67364150
MF
1750 break;
1751 case 4:
ab1da857 1752 stl_phys(&address_space_memory, addr, val);
67364150 1753 break;
1ec9b909
AK
1754 default: abort();
1755 }
6658ffb8
PB
1756}
1757
1ec9b909
AK
1758static const MemoryRegionOps watch_mem_ops = {
1759 .read = watch_mem_read,
1760 .write = watch_mem_write,
1761 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1762};
6658ffb8 1763
a8170e5e 1764static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1765 unsigned len)
db7b5426 1766{
acc9d80b
JK
1767 subpage_t *subpage = opaque;
1768 uint8_t buf[4];
791af8c8 1769
db7b5426 1770#if defined(DEBUG_SUBPAGE)
016e9d62 1771 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1772 subpage, len, addr);
db7b5426 1773#endif
acc9d80b
JK
1774 address_space_read(subpage->as, addr + subpage->base, buf, len);
1775 switch (len) {
1776 case 1:
1777 return ldub_p(buf);
1778 case 2:
1779 return lduw_p(buf);
1780 case 4:
1781 return ldl_p(buf);
1782 default:
1783 abort();
1784 }
db7b5426
BS
1785}
1786
a8170e5e 1787static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1788 uint64_t value, unsigned len)
db7b5426 1789{
acc9d80b
JK
1790 subpage_t *subpage = opaque;
1791 uint8_t buf[4];
1792
db7b5426 1793#if defined(DEBUG_SUBPAGE)
016e9d62 1794 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1795 " value %"PRIx64"\n",
1796 __func__, subpage, len, addr, value);
db7b5426 1797#endif
acc9d80b
JK
1798 switch (len) {
1799 case 1:
1800 stb_p(buf, value);
1801 break;
1802 case 2:
1803 stw_p(buf, value);
1804 break;
1805 case 4:
1806 stl_p(buf, value);
1807 break;
1808 default:
1809 abort();
1810 }
1811 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1812}
1813
c353e4cc 1814static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 1815 unsigned len, bool is_write)
c353e4cc 1816{
acc9d80b 1817 subpage_t *subpage = opaque;
c353e4cc 1818#if defined(DEBUG_SUBPAGE)
016e9d62 1819 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 1820 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1821#endif
1822
acc9d80b 1823 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 1824 len, is_write);
c353e4cc
PB
1825}
1826
70c68e44
AK
1827static const MemoryRegionOps subpage_ops = {
1828 .read = subpage_read,
1829 .write = subpage_write,
c353e4cc 1830 .valid.accepts = subpage_accepts,
70c68e44 1831 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1832};
1833
c227f099 1834static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1835 uint16_t section)
db7b5426
BS
1836{
1837 int idx, eidx;
1838
1839 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1840 return -1;
1841 idx = SUBPAGE_IDX(start);
1842 eidx = SUBPAGE_IDX(end);
1843#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1844 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1845 __func__, mmio, start, end, idx, eidx, section);
db7b5426 1846#endif
db7b5426 1847 for (; idx <= eidx; idx++) {
5312bd8b 1848 mmio->sub_section[idx] = section;
db7b5426
BS
1849 }
1850
1851 return 0;
1852}
1853
acc9d80b 1854static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1855{
c227f099 1856 subpage_t *mmio;
db7b5426 1857
7267c094 1858 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1859
acc9d80b 1860 mmio->as = as;
1eec614b 1861 mmio->base = base;
2c9b15ca 1862 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 1863 NULL, TARGET_PAGE_SIZE);
b3b00c78 1864 mmio->iomem.subpage = true;
db7b5426 1865#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1866 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1867 mmio, base, TARGET_PAGE_SIZE);
db7b5426 1868#endif
b41aac4f 1869 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1870
1871 return mmio;
1872}
1873
a656e22f
PC
1874static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1875 MemoryRegion *mr)
5312bd8b 1876{
a656e22f 1877 assert(as);
5312bd8b 1878 MemoryRegionSection section = {
a656e22f 1879 .address_space = as,
5312bd8b
AK
1880 .mr = mr,
1881 .offset_within_address_space = 0,
1882 .offset_within_region = 0,
052e87b0 1883 .size = int128_2_64(),
5312bd8b
AK
1884 };
1885
53cb28cb 1886 return phys_section_add(map, &section);
5312bd8b
AK
1887}
1888
77717094 1889MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
aa102231 1890{
77717094 1891 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1892}
1893
e9179ce1
AK
1894static void io_mem_init(void)
1895{
1f6245e5 1896 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 1897 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 1898 NULL, UINT64_MAX);
2c9b15ca 1899 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 1900 NULL, UINT64_MAX);
2c9b15ca 1901 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 1902 NULL, UINT64_MAX);
e9179ce1
AK
1903}
1904
ac1970fb 1905static void mem_begin(MemoryListener *listener)
00752703
PB
1906{
1907 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
1908 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1909 uint16_t n;
1910
a656e22f 1911 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 1912 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 1913 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 1914 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 1915 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 1916 assert(n == PHYS_SECTION_ROM);
a656e22f 1917 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 1918 assert(n == PHYS_SECTION_WATCH);
00752703 1919
9736e55b 1920 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
1921 d->as = as;
1922 as->next_dispatch = d;
1923}
1924
1925static void mem_commit(MemoryListener *listener)
ac1970fb 1926{
89ae337a 1927 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1928 AddressSpaceDispatch *cur = as->dispatch;
1929 AddressSpaceDispatch *next = as->next_dispatch;
1930
53cb28cb 1931 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 1932
0475d94f 1933 as->dispatch = next;
b41aac4f 1934
53cb28cb
MA
1935 if (cur) {
1936 phys_sections_free(&cur->map);
1937 g_free(cur);
1938 }
9affd6fc
PB
1939}
1940
1d71148e 1941static void tcg_commit(MemoryListener *listener)
50c1e149 1942{
182735ef 1943 CPUState *cpu;
117712c3
AK
1944
1945 /* since each CPU stores ram addresses in its TLB cache, we must
1946 reset the modified entries */
1947 /* XXX: slow ! */
bdc44640 1948 CPU_FOREACH(cpu) {
33bde2e1
EI
1949 /* FIXME: Disentangle the cpu.h circular files deps so we can
1950 directly get the right CPU from listener. */
1951 if (cpu->tcg_as_listener != listener) {
1952 continue;
1953 }
00c8cb0a 1954 tlb_flush(cpu, 1);
117712c3 1955 }
50c1e149
AK
1956}
1957
93632747
AK
1958static void core_log_global_start(MemoryListener *listener)
1959{
981fdf23 1960 cpu_physical_memory_set_dirty_tracking(true);
93632747
AK
1961}
1962
1963static void core_log_global_stop(MemoryListener *listener)
1964{
981fdf23 1965 cpu_physical_memory_set_dirty_tracking(false);
93632747
AK
1966}
1967
93632747 1968static MemoryListener core_memory_listener = {
93632747
AK
1969 .log_global_start = core_log_global_start,
1970 .log_global_stop = core_log_global_stop,
ac1970fb 1971 .priority = 1,
93632747
AK
1972};
1973
ac1970fb
AK
1974void address_space_init_dispatch(AddressSpace *as)
1975{
00752703 1976 as->dispatch = NULL;
89ae337a 1977 as->dispatch_listener = (MemoryListener) {
ac1970fb 1978 .begin = mem_begin,
00752703 1979 .commit = mem_commit,
ac1970fb
AK
1980 .region_add = mem_add,
1981 .region_nop = mem_add,
1982 .priority = 0,
1983 };
89ae337a 1984 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1985}
1986
83f3c251
AK
1987void address_space_destroy_dispatch(AddressSpace *as)
1988{
1989 AddressSpaceDispatch *d = as->dispatch;
1990
89ae337a 1991 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1992 g_free(d);
1993 as->dispatch = NULL;
1994}
1995
62152b8a
AK
1996static void memory_map_init(void)
1997{
7267c094 1998 system_memory = g_malloc(sizeof(*system_memory));
03f49957 1999
57271d63 2000 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2001 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2002
7267c094 2003 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2004 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2005 65536);
7dca8043 2006 address_space_init(&address_space_io, system_io, "I/O");
93632747 2007
f6790af6 2008 memory_listener_register(&core_memory_listener, &address_space_memory);
62152b8a
AK
2009}
2010
2011MemoryRegion *get_system_memory(void)
2012{
2013 return system_memory;
2014}
2015
309cb471
AK
2016MemoryRegion *get_system_io(void)
2017{
2018 return system_io;
2019}
2020
e2eef170
PB
2021#endif /* !defined(CONFIG_USER_ONLY) */
2022
13eb76e0
FB
2023/* physical memory access (slow version, mainly for debug) */
2024#if defined(CONFIG_USER_ONLY)
f17ec444 2025int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2026 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2027{
2028 int l, flags;
2029 target_ulong page;
53a5960a 2030 void * p;
13eb76e0
FB
2031
2032 while (len > 0) {
2033 page = addr & TARGET_PAGE_MASK;
2034 l = (page + TARGET_PAGE_SIZE) - addr;
2035 if (l > len)
2036 l = len;
2037 flags = page_get_flags(page);
2038 if (!(flags & PAGE_VALID))
a68fe89c 2039 return -1;
13eb76e0
FB
2040 if (is_write) {
2041 if (!(flags & PAGE_WRITE))
a68fe89c 2042 return -1;
579a97f7 2043 /* XXX: this code should not depend on lock_user */
72fb7daa 2044 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2045 return -1;
72fb7daa
AJ
2046 memcpy(p, buf, l);
2047 unlock_user(p, addr, l);
13eb76e0
FB
2048 } else {
2049 if (!(flags & PAGE_READ))
a68fe89c 2050 return -1;
579a97f7 2051 /* XXX: this code should not depend on lock_user */
72fb7daa 2052 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2053 return -1;
72fb7daa 2054 memcpy(buf, p, l);
5b257578 2055 unlock_user(p, addr, 0);
13eb76e0
FB
2056 }
2057 len -= l;
2058 buf += l;
2059 addr += l;
2060 }
a68fe89c 2061 return 0;
13eb76e0 2062}
8df1cd07 2063
13eb76e0 2064#else
51d7a9eb 2065
a8170e5e
AK
2066static void invalidate_and_set_dirty(hwaddr addr,
2067 hwaddr length)
51d7a9eb 2068{
f874bf90
PM
2069 if (cpu_physical_memory_range_includes_clean(addr, length)) {
2070 tb_invalidate_phys_range(addr, addr + length, 0);
6886867e 2071 cpu_physical_memory_set_dirty_range_nocode(addr, length);
51d7a9eb 2072 }
e226939d 2073 xen_modified_memory(addr, length);
51d7a9eb
AP
2074}
2075
23326164 2076static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2077{
e1622f4b 2078 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2079
2080 /* Regions are assumed to support 1-4 byte accesses unless
2081 otherwise specified. */
23326164
RH
2082 if (access_size_max == 0) {
2083 access_size_max = 4;
2084 }
2085
2086 /* Bound the maximum access by the alignment of the address. */
2087 if (!mr->ops->impl.unaligned) {
2088 unsigned align_size_max = addr & -addr;
2089 if (align_size_max != 0 && align_size_max < access_size_max) {
2090 access_size_max = align_size_max;
2091 }
82f2563f 2092 }
23326164
RH
2093
2094 /* Don't attempt accesses larger than the maximum. */
2095 if (l > access_size_max) {
2096 l = access_size_max;
82f2563f 2097 }
098178f2
PB
2098 if (l & (l - 1)) {
2099 l = 1 << (qemu_fls(l) - 1);
2100 }
23326164
RH
2101
2102 return l;
82f2563f
PB
2103}
2104
fd8aaa76 2105bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 2106 int len, bool is_write)
13eb76e0 2107{
149f54b5 2108 hwaddr l;
13eb76e0 2109 uint8_t *ptr;
791af8c8 2110 uint64_t val;
149f54b5 2111 hwaddr addr1;
5c8a00ce 2112 MemoryRegion *mr;
fd8aaa76 2113 bool error = false;
3b46e624 2114
13eb76e0 2115 while (len > 0) {
149f54b5 2116 l = len;
5c8a00ce 2117 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2118
13eb76e0 2119 if (is_write) {
5c8a00ce
PB
2120 if (!memory_access_is_direct(mr, is_write)) {
2121 l = memory_access_size(mr, l, addr1);
4917cf44 2122 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2123 potential bugs */
23326164
RH
2124 switch (l) {
2125 case 8:
2126 /* 64 bit write access */
2127 val = ldq_p(buf);
2128 error |= io_mem_write(mr, addr1, val, 8);
2129 break;
2130 case 4:
1c213d19 2131 /* 32 bit write access */
c27004ec 2132 val = ldl_p(buf);
5c8a00ce 2133 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
2134 break;
2135 case 2:
1c213d19 2136 /* 16 bit write access */
c27004ec 2137 val = lduw_p(buf);
5c8a00ce 2138 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
2139 break;
2140 case 1:
1c213d19 2141 /* 8 bit write access */
c27004ec 2142 val = ldub_p(buf);
5c8a00ce 2143 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
2144 break;
2145 default:
2146 abort();
13eb76e0 2147 }
2bbfa05d 2148 } else {
5c8a00ce 2149 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2150 /* RAM case */
5579c7f3 2151 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2152 memcpy(ptr, buf, l);
51d7a9eb 2153 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
2154 }
2155 } else {
5c8a00ce 2156 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2157 /* I/O case */
5c8a00ce 2158 l = memory_access_size(mr, l, addr1);
23326164
RH
2159 switch (l) {
2160 case 8:
2161 /* 64 bit read access */
2162 error |= io_mem_read(mr, addr1, &val, 8);
2163 stq_p(buf, val);
2164 break;
2165 case 4:
13eb76e0 2166 /* 32 bit read access */
5c8a00ce 2167 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 2168 stl_p(buf, val);
23326164
RH
2169 break;
2170 case 2:
13eb76e0 2171 /* 16 bit read access */
5c8a00ce 2172 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 2173 stw_p(buf, val);
23326164
RH
2174 break;
2175 case 1:
1c213d19 2176 /* 8 bit read access */
5c8a00ce 2177 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 2178 stb_p(buf, val);
23326164
RH
2179 break;
2180 default:
2181 abort();
13eb76e0
FB
2182 }
2183 } else {
2184 /* RAM case */
5c8a00ce 2185 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2186 memcpy(buf, ptr, l);
13eb76e0
FB
2187 }
2188 }
2189 len -= l;
2190 buf += l;
2191 addr += l;
2192 }
fd8aaa76
PB
2193
2194 return error;
13eb76e0 2195}
8df1cd07 2196
fd8aaa76 2197bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2198 const uint8_t *buf, int len)
2199{
fd8aaa76 2200 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2201}
2202
fd8aaa76 2203bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2204{
fd8aaa76 2205 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2206}
2207
2208
a8170e5e 2209void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2210 int len, int is_write)
2211{
fd8aaa76 2212 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2213}
2214
582b55a9
AG
2215enum write_rom_type {
2216 WRITE_DATA,
2217 FLUSH_CACHE,
2218};
2219
2a221651 2220static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2221 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2222{
149f54b5 2223 hwaddr l;
d0ecd2aa 2224 uint8_t *ptr;
149f54b5 2225 hwaddr addr1;
5c8a00ce 2226 MemoryRegion *mr;
3b46e624 2227
d0ecd2aa 2228 while (len > 0) {
149f54b5 2229 l = len;
2a221651 2230 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2231
5c8a00ce
PB
2232 if (!(memory_region_is_ram(mr) ||
2233 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2234 /* do nothing */
2235 } else {
5c8a00ce 2236 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2237 /* ROM/RAM case */
5579c7f3 2238 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2239 switch (type) {
2240 case WRITE_DATA:
2241 memcpy(ptr, buf, l);
2242 invalidate_and_set_dirty(addr1, l);
2243 break;
2244 case FLUSH_CACHE:
2245 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2246 break;
2247 }
d0ecd2aa
FB
2248 }
2249 len -= l;
2250 buf += l;
2251 addr += l;
2252 }
2253}
2254
582b55a9 2255/* used for ROM loading : can write in RAM and ROM */
2a221651 2256void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2257 const uint8_t *buf, int len)
2258{
2a221651 2259 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2260}
2261
2262void cpu_flush_icache_range(hwaddr start, int len)
2263{
2264 /*
2265 * This function should do the same thing as an icache flush that was
2266 * triggered from within the guest. For TCG we are always cache coherent,
2267 * so there is no need to flush anything. For KVM / Xen we need to flush
2268 * the host's instruction cache at least.
2269 */
2270 if (tcg_enabled()) {
2271 return;
2272 }
2273
2a221651
EI
2274 cpu_physical_memory_write_rom_internal(&address_space_memory,
2275 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2276}
2277
6d16c2f8 2278typedef struct {
d3e71559 2279 MemoryRegion *mr;
6d16c2f8 2280 void *buffer;
a8170e5e
AK
2281 hwaddr addr;
2282 hwaddr len;
6d16c2f8
AL
2283} BounceBuffer;
2284
2285static BounceBuffer bounce;
2286
ba223c29
AL
2287typedef struct MapClient {
2288 void *opaque;
2289 void (*callback)(void *opaque);
72cf2d4f 2290 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2291} MapClient;
2292
72cf2d4f
BS
2293static QLIST_HEAD(map_client_list, MapClient) map_client_list
2294 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2295
2296void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2297{
7267c094 2298 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2299
2300 client->opaque = opaque;
2301 client->callback = callback;
72cf2d4f 2302 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2303 return client;
2304}
2305
8b9c99d9 2306static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2307{
2308 MapClient *client = (MapClient *)_client;
2309
72cf2d4f 2310 QLIST_REMOVE(client, link);
7267c094 2311 g_free(client);
ba223c29
AL
2312}
2313
2314static void cpu_notify_map_clients(void)
2315{
2316 MapClient *client;
2317
72cf2d4f
BS
2318 while (!QLIST_EMPTY(&map_client_list)) {
2319 client = QLIST_FIRST(&map_client_list);
ba223c29 2320 client->callback(client->opaque);
34d5e948 2321 cpu_unregister_map_client(client);
ba223c29
AL
2322 }
2323}
2324
51644ab7
PB
2325bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2326{
5c8a00ce 2327 MemoryRegion *mr;
51644ab7
PB
2328 hwaddr l, xlat;
2329
2330 while (len > 0) {
2331 l = len;
5c8a00ce
PB
2332 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2333 if (!memory_access_is_direct(mr, is_write)) {
2334 l = memory_access_size(mr, l, addr);
2335 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2336 return false;
2337 }
2338 }
2339
2340 len -= l;
2341 addr += l;
2342 }
2343 return true;
2344}
2345
6d16c2f8
AL
2346/* Map a physical memory region into a host virtual address.
2347 * May map a subset of the requested range, given by and returned in *plen.
2348 * May return NULL if resources needed to perform the mapping are exhausted.
2349 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2350 * Use cpu_register_map_client() to know when retrying the map operation is
2351 * likely to succeed.
6d16c2f8 2352 */
ac1970fb 2353void *address_space_map(AddressSpace *as,
a8170e5e
AK
2354 hwaddr addr,
2355 hwaddr *plen,
ac1970fb 2356 bool is_write)
6d16c2f8 2357{
a8170e5e 2358 hwaddr len = *plen;
e3127ae0
PB
2359 hwaddr done = 0;
2360 hwaddr l, xlat, base;
2361 MemoryRegion *mr, *this_mr;
2362 ram_addr_t raddr;
6d16c2f8 2363
e3127ae0
PB
2364 if (len == 0) {
2365 return NULL;
2366 }
38bee5dc 2367
e3127ae0
PB
2368 l = len;
2369 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2370 if (!memory_access_is_direct(mr, is_write)) {
2371 if (bounce.buffer) {
2372 return NULL;
6d16c2f8 2373 }
e85d9db5
KW
2374 /* Avoid unbounded allocations */
2375 l = MIN(l, TARGET_PAGE_SIZE);
2376 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2377 bounce.addr = addr;
2378 bounce.len = l;
d3e71559
PB
2379
2380 memory_region_ref(mr);
2381 bounce.mr = mr;
e3127ae0
PB
2382 if (!is_write) {
2383 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2384 }
6d16c2f8 2385
e3127ae0
PB
2386 *plen = l;
2387 return bounce.buffer;
2388 }
2389
2390 base = xlat;
2391 raddr = memory_region_get_ram_addr(mr);
2392
2393 for (;;) {
6d16c2f8
AL
2394 len -= l;
2395 addr += l;
e3127ae0
PB
2396 done += l;
2397 if (len == 0) {
2398 break;
2399 }
2400
2401 l = len;
2402 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2403 if (this_mr != mr || xlat != base + done) {
2404 break;
2405 }
6d16c2f8 2406 }
e3127ae0 2407
d3e71559 2408 memory_region_ref(mr);
e3127ae0
PB
2409 *plen = done;
2410 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2411}
2412
ac1970fb 2413/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2414 * Will also mark the memory as dirty if is_write == 1. access_len gives
2415 * the amount of memory that was actually read or written by the caller.
2416 */
a8170e5e
AK
2417void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2418 int is_write, hwaddr access_len)
6d16c2f8
AL
2419{
2420 if (buffer != bounce.buffer) {
d3e71559
PB
2421 MemoryRegion *mr;
2422 ram_addr_t addr1;
2423
2424 mr = qemu_ram_addr_from_host(buffer, &addr1);
2425 assert(mr != NULL);
6d16c2f8 2426 if (is_write) {
6886867e 2427 invalidate_and_set_dirty(addr1, access_len);
6d16c2f8 2428 }
868bb33f 2429 if (xen_enabled()) {
e41d7c69 2430 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2431 }
d3e71559 2432 memory_region_unref(mr);
6d16c2f8
AL
2433 return;
2434 }
2435 if (is_write) {
ac1970fb 2436 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2437 }
f8a83245 2438 qemu_vfree(bounce.buffer);
6d16c2f8 2439 bounce.buffer = NULL;
d3e71559 2440 memory_region_unref(bounce.mr);
ba223c29 2441 cpu_notify_map_clients();
6d16c2f8 2442}
d0ecd2aa 2443
a8170e5e
AK
2444void *cpu_physical_memory_map(hwaddr addr,
2445 hwaddr *plen,
ac1970fb
AK
2446 int is_write)
2447{
2448 return address_space_map(&address_space_memory, addr, plen, is_write);
2449}
2450
a8170e5e
AK
2451void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2452 int is_write, hwaddr access_len)
ac1970fb
AK
2453{
2454 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2455}
2456
8df1cd07 2457/* warning: addr must be aligned */
fdfba1a2 2458static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2459 enum device_endian endian)
8df1cd07 2460{
8df1cd07 2461 uint8_t *ptr;
791af8c8 2462 uint64_t val;
5c8a00ce 2463 MemoryRegion *mr;
149f54b5
PB
2464 hwaddr l = 4;
2465 hwaddr addr1;
8df1cd07 2466
fdfba1a2 2467 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2468 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2469 /* I/O case */
5c8a00ce 2470 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2471#if defined(TARGET_WORDS_BIGENDIAN)
2472 if (endian == DEVICE_LITTLE_ENDIAN) {
2473 val = bswap32(val);
2474 }
2475#else
2476 if (endian == DEVICE_BIG_ENDIAN) {
2477 val = bswap32(val);
2478 }
2479#endif
8df1cd07
FB
2480 } else {
2481 /* RAM case */
5c8a00ce 2482 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2483 & TARGET_PAGE_MASK)
149f54b5 2484 + addr1);
1e78bcc1
AG
2485 switch (endian) {
2486 case DEVICE_LITTLE_ENDIAN:
2487 val = ldl_le_p(ptr);
2488 break;
2489 case DEVICE_BIG_ENDIAN:
2490 val = ldl_be_p(ptr);
2491 break;
2492 default:
2493 val = ldl_p(ptr);
2494 break;
2495 }
8df1cd07
FB
2496 }
2497 return val;
2498}
2499
fdfba1a2 2500uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2501{
fdfba1a2 2502 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2503}
2504
fdfba1a2 2505uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2506{
fdfba1a2 2507 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2508}
2509
fdfba1a2 2510uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2511{
fdfba1a2 2512 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2513}
2514
84b7b8e7 2515/* warning: addr must be aligned */
2c17449b 2516static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2517 enum device_endian endian)
84b7b8e7 2518{
84b7b8e7
FB
2519 uint8_t *ptr;
2520 uint64_t val;
5c8a00ce 2521 MemoryRegion *mr;
149f54b5
PB
2522 hwaddr l = 8;
2523 hwaddr addr1;
84b7b8e7 2524
2c17449b 2525 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2526 false);
2527 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2528 /* I/O case */
5c8a00ce 2529 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2530#if defined(TARGET_WORDS_BIGENDIAN)
2531 if (endian == DEVICE_LITTLE_ENDIAN) {
2532 val = bswap64(val);
2533 }
2534#else
2535 if (endian == DEVICE_BIG_ENDIAN) {
2536 val = bswap64(val);
2537 }
84b7b8e7
FB
2538#endif
2539 } else {
2540 /* RAM case */
5c8a00ce 2541 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2542 & TARGET_PAGE_MASK)
149f54b5 2543 + addr1);
1e78bcc1
AG
2544 switch (endian) {
2545 case DEVICE_LITTLE_ENDIAN:
2546 val = ldq_le_p(ptr);
2547 break;
2548 case DEVICE_BIG_ENDIAN:
2549 val = ldq_be_p(ptr);
2550 break;
2551 default:
2552 val = ldq_p(ptr);
2553 break;
2554 }
84b7b8e7
FB
2555 }
2556 return val;
2557}
2558
2c17449b 2559uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2560{
2c17449b 2561 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2562}
2563
2c17449b 2564uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2565{
2c17449b 2566 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2567}
2568
2c17449b 2569uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2570{
2c17449b 2571 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2572}
2573
aab33094 2574/* XXX: optimize */
2c17449b 2575uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
aab33094
FB
2576{
2577 uint8_t val;
2c17449b 2578 address_space_rw(as, addr, &val, 1, 0);
aab33094
FB
2579 return val;
2580}
2581
733f0b02 2582/* warning: addr must be aligned */
41701aa4 2583static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2584 enum device_endian endian)
aab33094 2585{
733f0b02
MT
2586 uint8_t *ptr;
2587 uint64_t val;
5c8a00ce 2588 MemoryRegion *mr;
149f54b5
PB
2589 hwaddr l = 2;
2590 hwaddr addr1;
733f0b02 2591
41701aa4 2592 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2593 false);
2594 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2595 /* I/O case */
5c8a00ce 2596 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2597#if defined(TARGET_WORDS_BIGENDIAN)
2598 if (endian == DEVICE_LITTLE_ENDIAN) {
2599 val = bswap16(val);
2600 }
2601#else
2602 if (endian == DEVICE_BIG_ENDIAN) {
2603 val = bswap16(val);
2604 }
2605#endif
733f0b02
MT
2606 } else {
2607 /* RAM case */
5c8a00ce 2608 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2609 & TARGET_PAGE_MASK)
149f54b5 2610 + addr1);
1e78bcc1
AG
2611 switch (endian) {
2612 case DEVICE_LITTLE_ENDIAN:
2613 val = lduw_le_p(ptr);
2614 break;
2615 case DEVICE_BIG_ENDIAN:
2616 val = lduw_be_p(ptr);
2617 break;
2618 default:
2619 val = lduw_p(ptr);
2620 break;
2621 }
733f0b02
MT
2622 }
2623 return val;
aab33094
FB
2624}
2625
41701aa4 2626uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2627{
41701aa4 2628 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2629}
2630
41701aa4 2631uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2632{
41701aa4 2633 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2634}
2635
41701aa4 2636uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2637{
41701aa4 2638 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2639}
2640
8df1cd07
FB
2641/* warning: addr must be aligned. The ram page is not masked as dirty
2642 and the code inside is not invalidated. It is useful if the dirty
2643 bits are used to track modified PTEs */
2198a121 2644void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
8df1cd07 2645{
8df1cd07 2646 uint8_t *ptr;
5c8a00ce 2647 MemoryRegion *mr;
149f54b5
PB
2648 hwaddr l = 4;
2649 hwaddr addr1;
8df1cd07 2650
2198a121 2651 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2652 true);
2653 if (l < 4 || !memory_access_is_direct(mr, true)) {
2654 io_mem_write(mr, addr1, val, 4);
8df1cd07 2655 } else {
5c8a00ce 2656 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2657 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2658 stl_p(ptr, val);
74576198
AL
2659
2660 if (unlikely(in_migration)) {
a2cd8c85 2661 if (cpu_physical_memory_is_clean(addr1)) {
74576198
AL
2662 /* invalidate code */
2663 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2664 /* set dirty bit */
6886867e 2665 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
74576198
AL
2666 }
2667 }
8df1cd07
FB
2668 }
2669}
2670
2671/* warning: addr must be aligned */
ab1da857
EI
2672static inline void stl_phys_internal(AddressSpace *as,
2673 hwaddr addr, uint32_t val,
1e78bcc1 2674 enum device_endian endian)
8df1cd07 2675{
8df1cd07 2676 uint8_t *ptr;
5c8a00ce 2677 MemoryRegion *mr;
149f54b5
PB
2678 hwaddr l = 4;
2679 hwaddr addr1;
8df1cd07 2680
ab1da857 2681 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2682 true);
2683 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2684#if defined(TARGET_WORDS_BIGENDIAN)
2685 if (endian == DEVICE_LITTLE_ENDIAN) {
2686 val = bswap32(val);
2687 }
2688#else
2689 if (endian == DEVICE_BIG_ENDIAN) {
2690 val = bswap32(val);
2691 }
2692#endif
5c8a00ce 2693 io_mem_write(mr, addr1, val, 4);
8df1cd07 2694 } else {
8df1cd07 2695 /* RAM case */
5c8a00ce 2696 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2697 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2698 switch (endian) {
2699 case DEVICE_LITTLE_ENDIAN:
2700 stl_le_p(ptr, val);
2701 break;
2702 case DEVICE_BIG_ENDIAN:
2703 stl_be_p(ptr, val);
2704 break;
2705 default:
2706 stl_p(ptr, val);
2707 break;
2708 }
51d7a9eb 2709 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2710 }
2711}
2712
ab1da857 2713void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2714{
ab1da857 2715 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2716}
2717
ab1da857 2718void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2719{
ab1da857 2720 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2721}
2722
ab1da857 2723void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2724{
ab1da857 2725 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2726}
2727
aab33094 2728/* XXX: optimize */
db3be60d 2729void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
aab33094
FB
2730{
2731 uint8_t v = val;
db3be60d 2732 address_space_rw(as, addr, &v, 1, 1);
aab33094
FB
2733}
2734
733f0b02 2735/* warning: addr must be aligned */
5ce5944d
EI
2736static inline void stw_phys_internal(AddressSpace *as,
2737 hwaddr addr, uint32_t val,
1e78bcc1 2738 enum device_endian endian)
aab33094 2739{
733f0b02 2740 uint8_t *ptr;
5c8a00ce 2741 MemoryRegion *mr;
149f54b5
PB
2742 hwaddr l = 2;
2743 hwaddr addr1;
733f0b02 2744
5ce5944d 2745 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 2746 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2747#if defined(TARGET_WORDS_BIGENDIAN)
2748 if (endian == DEVICE_LITTLE_ENDIAN) {
2749 val = bswap16(val);
2750 }
2751#else
2752 if (endian == DEVICE_BIG_ENDIAN) {
2753 val = bswap16(val);
2754 }
2755#endif
5c8a00ce 2756 io_mem_write(mr, addr1, val, 2);
733f0b02 2757 } else {
733f0b02 2758 /* RAM case */
5c8a00ce 2759 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2760 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2761 switch (endian) {
2762 case DEVICE_LITTLE_ENDIAN:
2763 stw_le_p(ptr, val);
2764 break;
2765 case DEVICE_BIG_ENDIAN:
2766 stw_be_p(ptr, val);
2767 break;
2768 default:
2769 stw_p(ptr, val);
2770 break;
2771 }
51d7a9eb 2772 invalidate_and_set_dirty(addr1, 2);
733f0b02 2773 }
aab33094
FB
2774}
2775
5ce5944d 2776void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2777{
5ce5944d 2778 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2779}
2780
5ce5944d 2781void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2782{
5ce5944d 2783 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2784}
2785
5ce5944d 2786void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2787{
5ce5944d 2788 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2789}
2790
aab33094 2791/* XXX: optimize */
f606604f 2792void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
aab33094
FB
2793{
2794 val = tswap64(val);
f606604f 2795 address_space_rw(as, addr, (void *) &val, 8, 1);
aab33094
FB
2796}
2797
f606604f 2798void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2799{
2800 val = cpu_to_le64(val);
f606604f 2801 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2802}
2803
f606604f 2804void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2805{
2806 val = cpu_to_be64(val);
f606604f 2807 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2808}
2809
5e2972fd 2810/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2811int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2812 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2813{
2814 int l;
a8170e5e 2815 hwaddr phys_addr;
9b3c35e0 2816 target_ulong page;
13eb76e0
FB
2817
2818 while (len > 0) {
2819 page = addr & TARGET_PAGE_MASK;
f17ec444 2820 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2821 /* if no physical page mapped, return an error */
2822 if (phys_addr == -1)
2823 return -1;
2824 l = (page + TARGET_PAGE_SIZE) - addr;
2825 if (l > len)
2826 l = len;
5e2972fd 2827 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
2828 if (is_write) {
2829 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2830 } else {
2831 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2832 }
13eb76e0
FB
2833 len -= l;
2834 buf += l;
2835 addr += l;
2836 }
2837 return 0;
2838}
a68fe89c 2839#endif
13eb76e0 2840
8e4a424b
BS
2841/*
2842 * A helper function for the _utterly broken_ virtio device model to find out if
2843 * it's running on a big endian machine. Don't do this at home kids!
2844 */
98ed8ecf
GK
2845bool target_words_bigendian(void);
2846bool target_words_bigendian(void)
8e4a424b
BS
2847{
2848#if defined(TARGET_WORDS_BIGENDIAN)
2849 return true;
2850#else
2851 return false;
2852#endif
2853}
2854
76f35538 2855#ifndef CONFIG_USER_ONLY
a8170e5e 2856bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2857{
5c8a00ce 2858 MemoryRegion*mr;
149f54b5 2859 hwaddr l = 1;
76f35538 2860
5c8a00ce
PB
2861 mr = address_space_translate(&address_space_memory,
2862 phys_addr, &phys_addr, &l, false);
76f35538 2863
5c8a00ce
PB
2864 return !(memory_region_is_ram(mr) ||
2865 memory_region_is_romd(mr));
76f35538 2866}
bd2fa51f
MH
2867
2868void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2869{
2870 RAMBlock *block;
2871
2872 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2873 func(block->host, block->offset, block->length, opaque);
2874 }
2875}
ec3f8c99 2876#endif
This page took 1.362858 seconds and 4 git commands to generate.