]> Git Repo - qemu.git/blame - exec.c
exec: make iotlb RCU-friendly
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
cc9e98cb 29#include "hw/qdev.h"
1de7afc9 30#include "qemu/osdep.h"
9c17d615 31#include "sysemu/kvm.h"
2ff3de68 32#include "sysemu/sysemu.h"
0d09e41a 33#include "hw/xen/xen.h"
1de7afc9
PB
34#include "qemu/timer.h"
35#include "qemu/config-file.h"
75a34036 36#include "qemu/error-report.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
220c3ebd 52#include "exec/ram_addr.h"
67d95c15 53
b35ba30f
MT
54#include "qemu/range.h"
55
db7b5426 56//#define DEBUG_SUBPAGE
1196be37 57
e2eef170 58#if !defined(CONFIG_USER_ONLY)
981fdf23 59static bool in_migration;
94a6b54f 60
a3161038 61RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
62
63static MemoryRegion *system_memory;
309cb471 64static MemoryRegion *system_io;
62152b8a 65
f6790af6
AK
66AddressSpace address_space_io;
67AddressSpace address_space_memory;
2673a5da 68
0844e007 69MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 70static MemoryRegion io_mem_unassigned;
0e0df1e2 71
7bd4f430
PB
72/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73#define RAM_PREALLOC (1 << 0)
74
dbcb8981
PB
75/* RAM is mmap-ed with MAP_SHARED */
76#define RAM_SHARED (1 << 1)
77
62be4e3a
MT
78/* Only a portion of RAM (used_length) is actually used, and migrated.
79 * This used_length size can change across reboots.
80 */
81#define RAM_RESIZEABLE (1 << 2)
82
e2eef170 83#endif
9fa3e853 84
bdc44640 85struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
86/* current CPU in the current thread. It is only valid inside
87 cpu_exec() */
4917cf44 88DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 89/* 0 = Do not count executed instructions.
bf20dc07 90 1 = Precise instruction counting.
2e70f6ef 91 2 = Adaptive rate instruction counting. */
5708fc66 92int use_icount;
6a00d601 93
e2eef170 94#if !defined(CONFIG_USER_ONLY)
4346ae3e 95
1db8abb1
PB
96typedef struct PhysPageEntry PhysPageEntry;
97
98struct PhysPageEntry {
9736e55b 99 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 100 uint32_t skip : 6;
9736e55b 101 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 102 uint32_t ptr : 26;
1db8abb1
PB
103};
104
8b795765
MT
105#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
106
03f49957 107/* Size of the L2 (and L3, etc) page tables. */
57271d63 108#define ADDR_SPACE_BITS 64
03f49957 109
026736ce 110#define P_L2_BITS 9
03f49957
PB
111#define P_L2_SIZE (1 << P_L2_BITS)
112
113#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
114
115typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 116
53cb28cb
MA
117typedef struct PhysPageMap {
118 unsigned sections_nb;
119 unsigned sections_nb_alloc;
120 unsigned nodes_nb;
121 unsigned nodes_nb_alloc;
122 Node *nodes;
123 MemoryRegionSection *sections;
124} PhysPageMap;
125
1db8abb1
PB
126struct AddressSpaceDispatch {
127 /* This is a multi-level map on the physical address space.
128 * The bottom level has pointers to MemoryRegionSections.
129 */
130 PhysPageEntry phys_map;
53cb28cb 131 PhysPageMap map;
acc9d80b 132 AddressSpace *as;
1db8abb1
PB
133};
134
90260c6c
JK
135#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
136typedef struct subpage_t {
137 MemoryRegion iomem;
acc9d80b 138 AddressSpace *as;
90260c6c
JK
139 hwaddr base;
140 uint16_t sub_section[TARGET_PAGE_SIZE];
141} subpage_t;
142
b41aac4f
LPF
143#define PHYS_SECTION_UNASSIGNED 0
144#define PHYS_SECTION_NOTDIRTY 1
145#define PHYS_SECTION_ROM 2
146#define PHYS_SECTION_WATCH 3
5312bd8b 147
e2eef170 148static void io_mem_init(void);
62152b8a 149static void memory_map_init(void);
09daed84 150static void tcg_commit(MemoryListener *listener);
e2eef170 151
1ec9b909 152static MemoryRegion io_mem_watch;
6658ffb8 153#endif
fd6ce8f6 154
6d9a1304 155#if !defined(CONFIG_USER_ONLY)
d6f2ea22 156
53cb28cb 157static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 158{
53cb28cb
MA
159 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
160 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
161 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
162 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 163 }
f7bf5461
AK
164}
165
53cb28cb 166static uint32_t phys_map_node_alloc(PhysPageMap *map)
f7bf5461
AK
167{
168 unsigned i;
8b795765 169 uint32_t ret;
f7bf5461 170
53cb28cb 171 ret = map->nodes_nb++;
f7bf5461 172 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 173 assert(ret != map->nodes_nb_alloc);
03f49957 174 for (i = 0; i < P_L2_SIZE; ++i) {
53cb28cb
MA
175 map->nodes[ret][i].skip = 1;
176 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 177 }
f7bf5461 178 return ret;
d6f2ea22
AK
179}
180
53cb28cb
MA
181static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
182 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 183 int level)
f7bf5461
AK
184{
185 PhysPageEntry *p;
186 int i;
03f49957 187 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 188
9736e55b 189 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
53cb28cb
MA
190 lp->ptr = phys_map_node_alloc(map);
191 p = map->nodes[lp->ptr];
f7bf5461 192 if (level == 0) {
03f49957 193 for (i = 0; i < P_L2_SIZE; i++) {
9736e55b 194 p[i].skip = 0;
b41aac4f 195 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 196 }
67c4d23c 197 }
f7bf5461 198 } else {
53cb28cb 199 p = map->nodes[lp->ptr];
92e873b9 200 }
03f49957 201 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 202
03f49957 203 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 204 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 205 lp->skip = 0;
c19e8800 206 lp->ptr = leaf;
07f07b31
AK
207 *index += step;
208 *nb -= step;
2999097b 209 } else {
53cb28cb 210 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
211 }
212 ++lp;
f7bf5461
AK
213 }
214}
215
ac1970fb 216static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 217 hwaddr index, hwaddr nb,
2999097b 218 uint16_t leaf)
f7bf5461 219{
2999097b 220 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 221 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 222
53cb28cb 223 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
224}
225
b35ba30f
MT
226/* Compact a non leaf page entry. Simply detect that the entry has a single child,
227 * and update our entry so we can skip it and go directly to the destination.
228 */
229static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
230{
231 unsigned valid_ptr = P_L2_SIZE;
232 int valid = 0;
233 PhysPageEntry *p;
234 int i;
235
236 if (lp->ptr == PHYS_MAP_NODE_NIL) {
237 return;
238 }
239
240 p = nodes[lp->ptr];
241 for (i = 0; i < P_L2_SIZE; i++) {
242 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
243 continue;
244 }
245
246 valid_ptr = i;
247 valid++;
248 if (p[i].skip) {
249 phys_page_compact(&p[i], nodes, compacted);
250 }
251 }
252
253 /* We can only compress if there's only one child. */
254 if (valid != 1) {
255 return;
256 }
257
258 assert(valid_ptr < P_L2_SIZE);
259
260 /* Don't compress if it won't fit in the # of bits we have. */
261 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
262 return;
263 }
264
265 lp->ptr = p[valid_ptr].ptr;
266 if (!p[valid_ptr].skip) {
267 /* If our only child is a leaf, make this a leaf. */
268 /* By design, we should have made this node a leaf to begin with so we
269 * should never reach here.
270 * But since it's so simple to handle this, let's do it just in case we
271 * change this rule.
272 */
273 lp->skip = 0;
274 } else {
275 lp->skip += p[valid_ptr].skip;
276 }
277}
278
279static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
280{
281 DECLARE_BITMAP(compacted, nodes_nb);
282
283 if (d->phys_map.skip) {
53cb28cb 284 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
285 }
286}
287
97115a8d 288static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 289 Node *nodes, MemoryRegionSection *sections)
92e873b9 290{
31ab2b4a 291 PhysPageEntry *p;
97115a8d 292 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 293 int i;
f1f6e3b8 294
9736e55b 295 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 296 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 297 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 298 }
9affd6fc 299 p = nodes[lp.ptr];
03f49957 300 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 301 }
b35ba30f
MT
302
303 if (sections[lp.ptr].size.hi ||
304 range_covers_byte(sections[lp.ptr].offset_within_address_space,
305 sections[lp.ptr].size.lo, addr)) {
306 return &sections[lp.ptr];
307 } else {
308 return &sections[PHYS_SECTION_UNASSIGNED];
309 }
f3705d53
AK
310}
311
e5548617
BS
312bool memory_region_is_unassigned(MemoryRegion *mr)
313{
2a8e7499 314 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 315 && mr != &io_mem_watch;
fd6ce8f6 316}
149f54b5 317
c7086b4a 318static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
319 hwaddr addr,
320 bool resolve_subpage)
9f029603 321{
90260c6c
JK
322 MemoryRegionSection *section;
323 subpage_t *subpage;
324
53cb28cb 325 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
326 if (resolve_subpage && section->mr->subpage) {
327 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 328 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
329 }
330 return section;
9f029603
JK
331}
332
90260c6c 333static MemoryRegionSection *
c7086b4a 334address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 335 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
336{
337 MemoryRegionSection *section;
a87f3954 338 Int128 diff;
149f54b5 339
c7086b4a 340 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
341 /* Compute offset within MemoryRegionSection */
342 addr -= section->offset_within_address_space;
343
344 /* Compute offset within MemoryRegion */
345 *xlat = addr + section->offset_within_region;
346
347 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 348 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
349 return section;
350}
90260c6c 351
a87f3954
PB
352static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
353{
354 if (memory_region_is_ram(mr)) {
355 return !(is_write && mr->readonly);
356 }
357 if (memory_region_is_romd(mr)) {
358 return !is_write;
359 }
360
361 return false;
362}
363
5c8a00ce
PB
364MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
365 hwaddr *xlat, hwaddr *plen,
366 bool is_write)
90260c6c 367{
30951157
AK
368 IOMMUTLBEntry iotlb;
369 MemoryRegionSection *section;
370 MemoryRegion *mr;
371 hwaddr len = *plen;
372
373 for (;;) {
a87f3954 374 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
375 mr = section->mr;
376
377 if (!mr->iommu_ops) {
378 break;
379 }
380
8d7b8cb9 381 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
30951157
AK
382 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
383 | (addr & iotlb.addr_mask));
384 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
385 if (!(iotlb.perm & (1 << is_write))) {
386 mr = &io_mem_unassigned;
387 break;
388 }
389
390 as = iotlb.target_as;
391 }
392
fe680d0d 393 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954
PB
394 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
395 len = MIN(page, len);
396 }
397
30951157
AK
398 *plen = len;
399 *xlat = addr;
400 return mr;
90260c6c
JK
401}
402
403MemoryRegionSection *
9d82b5a7
PB
404address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
405 hwaddr *xlat, hwaddr *plen)
90260c6c 406{
30951157 407 MemoryRegionSection *section;
9d82b5a7
PB
408 section = address_space_translate_internal(cpu->memory_dispatch,
409 addr, xlat, plen, false);
30951157
AK
410
411 assert(!section->mr->iommu_ops);
412 return section;
90260c6c 413}
5b6dd868 414#endif
fd6ce8f6 415
5b6dd868 416void cpu_exec_init_all(void)
fdbb84d1 417{
5b6dd868 418#if !defined(CONFIG_USER_ONLY)
b2a8658e 419 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
420 memory_map_init();
421 io_mem_init();
fdbb84d1 422#endif
5b6dd868 423}
fdbb84d1 424
b170fce3 425#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
426
427static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 428{
259186a7 429 CPUState *cpu = opaque;
a513fe19 430
5b6dd868
BS
431 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
432 version_id is increased. */
259186a7 433 cpu->interrupt_request &= ~0x01;
c01a71c1 434 tlb_flush(cpu, 1);
5b6dd868
BS
435
436 return 0;
a513fe19 437}
7501267e 438
6c3bff0e
PD
439static int cpu_common_pre_load(void *opaque)
440{
441 CPUState *cpu = opaque;
442
adee6424 443 cpu->exception_index = -1;
6c3bff0e
PD
444
445 return 0;
446}
447
448static bool cpu_common_exception_index_needed(void *opaque)
449{
450 CPUState *cpu = opaque;
451
adee6424 452 return tcg_enabled() && cpu->exception_index != -1;
6c3bff0e
PD
453}
454
455static const VMStateDescription vmstate_cpu_common_exception_index = {
456 .name = "cpu_common/exception_index",
457 .version_id = 1,
458 .minimum_version_id = 1,
459 .fields = (VMStateField[]) {
460 VMSTATE_INT32(exception_index, CPUState),
461 VMSTATE_END_OF_LIST()
462 }
463};
464
1a1562f5 465const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
466 .name = "cpu_common",
467 .version_id = 1,
468 .minimum_version_id = 1,
6c3bff0e 469 .pre_load = cpu_common_pre_load,
5b6dd868 470 .post_load = cpu_common_post_load,
35d08458 471 .fields = (VMStateField[]) {
259186a7
AF
472 VMSTATE_UINT32(halted, CPUState),
473 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868 474 VMSTATE_END_OF_LIST()
6c3bff0e
PD
475 },
476 .subsections = (VMStateSubsection[]) {
477 {
478 .vmsd = &vmstate_cpu_common_exception_index,
479 .needed = cpu_common_exception_index_needed,
480 } , {
481 /* empty */
482 }
5b6dd868
BS
483 }
484};
1a1562f5 485
5b6dd868 486#endif
ea041c0e 487
38d8f5c8 488CPUState *qemu_get_cpu(int index)
ea041c0e 489{
bdc44640 490 CPUState *cpu;
ea041c0e 491
bdc44640 492 CPU_FOREACH(cpu) {
55e5c285 493 if (cpu->cpu_index == index) {
bdc44640 494 return cpu;
55e5c285 495 }
ea041c0e 496 }
5b6dd868 497
bdc44640 498 return NULL;
ea041c0e
FB
499}
500
09daed84
EI
501#if !defined(CONFIG_USER_ONLY)
502void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
503{
504 /* We only support one address space per cpu at the moment. */
505 assert(cpu->as == as);
506
507 if (cpu->tcg_as_listener) {
508 memory_listener_unregister(cpu->tcg_as_listener);
509 } else {
510 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
511 }
512 cpu->tcg_as_listener->commit = tcg_commit;
513 memory_listener_register(cpu->tcg_as_listener, as);
514}
515#endif
516
5b6dd868 517void cpu_exec_init(CPUArchState *env)
ea041c0e 518{
5b6dd868 519 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 520 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 521 CPUState *some_cpu;
5b6dd868
BS
522 int cpu_index;
523
524#if defined(CONFIG_USER_ONLY)
525 cpu_list_lock();
526#endif
5b6dd868 527 cpu_index = 0;
bdc44640 528 CPU_FOREACH(some_cpu) {
5b6dd868
BS
529 cpu_index++;
530 }
55e5c285 531 cpu->cpu_index = cpu_index;
1b1ed8dc 532 cpu->numa_node = 0;
f0c3c505 533 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 534 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 535#ifndef CONFIG_USER_ONLY
09daed84 536 cpu->as = &address_space_memory;
5b6dd868
BS
537 cpu->thread_id = qemu_get_thread_id();
538#endif
bdc44640 539 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
540#if defined(CONFIG_USER_ONLY)
541 cpu_list_unlock();
542#endif
e0d47944
AF
543 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
544 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
545 }
5b6dd868 546#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
547 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
548 cpu_save, cpu_load, env);
b170fce3 549 assert(cc->vmsd == NULL);
e0d47944 550 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 551#endif
b170fce3
AF
552 if (cc->vmsd != NULL) {
553 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
554 }
ea041c0e
FB
555}
556
94df27fd 557#if defined(CONFIG_USER_ONLY)
00b941e5 558static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
559{
560 tb_invalidate_phys_page_range(pc, pc + 1, 0);
561}
562#else
00b941e5 563static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 564{
e8262a1b
MF
565 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
566 if (phys != -1) {
09daed84 567 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 568 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 569 }
1e7855a5 570}
c27004ec 571#endif
d720b93d 572
c527ee8f 573#if defined(CONFIG_USER_ONLY)
75a34036 574void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
575
576{
577}
578
3ee887e8
PM
579int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
580 int flags)
581{
582 return -ENOSYS;
583}
584
585void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
586{
587}
588
75a34036 589int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
590 int flags, CPUWatchpoint **watchpoint)
591{
592 return -ENOSYS;
593}
594#else
6658ffb8 595/* Add a watchpoint. */
75a34036 596int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 597 int flags, CPUWatchpoint **watchpoint)
6658ffb8 598{
c0ce998e 599 CPUWatchpoint *wp;
6658ffb8 600
05068c0d 601 /* forbid ranges which are empty or run off the end of the address space */
07e2863d 602 if (len == 0 || (addr + len - 1) < addr) {
75a34036
AF
603 error_report("tried to set invalid watchpoint at %"
604 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
605 return -EINVAL;
606 }
7267c094 607 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
608
609 wp->vaddr = addr;
05068c0d 610 wp->len = len;
a1d1bb31
AL
611 wp->flags = flags;
612
2dc9f411 613 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
614 if (flags & BP_GDB) {
615 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
616 } else {
617 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
618 }
6658ffb8 619
31b030d4 620 tlb_flush_page(cpu, addr);
a1d1bb31
AL
621
622 if (watchpoint)
623 *watchpoint = wp;
624 return 0;
6658ffb8
PB
625}
626
a1d1bb31 627/* Remove a specific watchpoint. */
75a34036 628int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 629 int flags)
6658ffb8 630{
a1d1bb31 631 CPUWatchpoint *wp;
6658ffb8 632
ff4700b0 633 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 634 if (addr == wp->vaddr && len == wp->len
6e140f28 635 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 636 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
637 return 0;
638 }
639 }
a1d1bb31 640 return -ENOENT;
6658ffb8
PB
641}
642
a1d1bb31 643/* Remove a specific watchpoint by reference. */
75a34036 644void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 645{
ff4700b0 646 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 647
31b030d4 648 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 649
7267c094 650 g_free(watchpoint);
a1d1bb31
AL
651}
652
653/* Remove all matching watchpoints. */
75a34036 654void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 655{
c0ce998e 656 CPUWatchpoint *wp, *next;
a1d1bb31 657
ff4700b0 658 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
659 if (wp->flags & mask) {
660 cpu_watchpoint_remove_by_ref(cpu, wp);
661 }
c0ce998e 662 }
7d03f82f 663}
05068c0d
PM
664
665/* Return true if this watchpoint address matches the specified
666 * access (ie the address range covered by the watchpoint overlaps
667 * partially or completely with the address range covered by the
668 * access).
669 */
670static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
671 vaddr addr,
672 vaddr len)
673{
674 /* We know the lengths are non-zero, but a little caution is
675 * required to avoid errors in the case where the range ends
676 * exactly at the top of the address space and so addr + len
677 * wraps round to zero.
678 */
679 vaddr wpend = wp->vaddr + wp->len - 1;
680 vaddr addrend = addr + len - 1;
681
682 return !(addr > wpend || wp->vaddr > addrend);
683}
684
c527ee8f 685#endif
7d03f82f 686
a1d1bb31 687/* Add a breakpoint. */
b3310ab3 688int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 689 CPUBreakpoint **breakpoint)
4c3a88a2 690{
c0ce998e 691 CPUBreakpoint *bp;
3b46e624 692
7267c094 693 bp = g_malloc(sizeof(*bp));
4c3a88a2 694
a1d1bb31
AL
695 bp->pc = pc;
696 bp->flags = flags;
697
2dc9f411 698 /* keep all GDB-injected breakpoints in front */
00b941e5 699 if (flags & BP_GDB) {
f0c3c505 700 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 701 } else {
f0c3c505 702 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 703 }
3b46e624 704
f0c3c505 705 breakpoint_invalidate(cpu, pc);
a1d1bb31 706
00b941e5 707 if (breakpoint) {
a1d1bb31 708 *breakpoint = bp;
00b941e5 709 }
4c3a88a2 710 return 0;
4c3a88a2
FB
711}
712
a1d1bb31 713/* Remove a specific breakpoint. */
b3310ab3 714int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 715{
a1d1bb31
AL
716 CPUBreakpoint *bp;
717
f0c3c505 718 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 719 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 720 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
721 return 0;
722 }
7d03f82f 723 }
a1d1bb31 724 return -ENOENT;
7d03f82f
EI
725}
726
a1d1bb31 727/* Remove a specific breakpoint by reference. */
b3310ab3 728void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 729{
f0c3c505
AF
730 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
731
732 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 733
7267c094 734 g_free(breakpoint);
a1d1bb31
AL
735}
736
737/* Remove all matching breakpoints. */
b3310ab3 738void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 739{
c0ce998e 740 CPUBreakpoint *bp, *next;
a1d1bb31 741
f0c3c505 742 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
743 if (bp->flags & mask) {
744 cpu_breakpoint_remove_by_ref(cpu, bp);
745 }
c0ce998e 746 }
4c3a88a2
FB
747}
748
c33a346e
FB
749/* enable or disable single step mode. EXCP_DEBUG is returned by the
750 CPU loop after each instruction */
3825b28f 751void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 752{
ed2803da
AF
753 if (cpu->singlestep_enabled != enabled) {
754 cpu->singlestep_enabled = enabled;
755 if (kvm_enabled()) {
38e478ec 756 kvm_update_guest_debug(cpu, 0);
ed2803da 757 } else {
ccbb4d44 758 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 759 /* XXX: only flush what is necessary */
38e478ec 760 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
761 tb_flush(env);
762 }
c33a346e 763 }
c33a346e
FB
764}
765
a47dddd7 766void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
767{
768 va_list ap;
493ae1f0 769 va_list ap2;
7501267e
FB
770
771 va_start(ap, fmt);
493ae1f0 772 va_copy(ap2, ap);
7501267e
FB
773 fprintf(stderr, "qemu: fatal: ");
774 vfprintf(stderr, fmt, ap);
775 fprintf(stderr, "\n");
878096ee 776 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
777 if (qemu_log_enabled()) {
778 qemu_log("qemu: fatal: ");
779 qemu_log_vprintf(fmt, ap2);
780 qemu_log("\n");
a0762859 781 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 782 qemu_log_flush();
93fcfe39 783 qemu_log_close();
924edcae 784 }
493ae1f0 785 va_end(ap2);
f9373291 786 va_end(ap);
fd052bf6
RV
787#if defined(CONFIG_USER_ONLY)
788 {
789 struct sigaction act;
790 sigfillset(&act.sa_mask);
791 act.sa_handler = SIG_DFL;
792 sigaction(SIGABRT, &act, NULL);
793 }
794#endif
7501267e
FB
795 abort();
796}
797
0124311e 798#if !defined(CONFIG_USER_ONLY)
041603fe
PB
799static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
800{
801 RAMBlock *block;
802
803 /* The list is protected by the iothread lock here. */
804 block = ram_list.mru_block;
9b8424d5 805 if (block && addr - block->offset < block->max_length) {
041603fe
PB
806 goto found;
807 }
808 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
9b8424d5 809 if (addr - block->offset < block->max_length) {
041603fe
PB
810 goto found;
811 }
812 }
813
814 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
815 abort();
816
817found:
818 ram_list.mru_block = block;
819 return block;
820}
821
a2f4d5be 822static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 823{
041603fe 824 ram_addr_t start1;
a2f4d5be
JQ
825 RAMBlock *block;
826 ram_addr_t end;
827
828 end = TARGET_PAGE_ALIGN(start + length);
829 start &= TARGET_PAGE_MASK;
d24981d3 830
041603fe
PB
831 block = qemu_get_ram_block(start);
832 assert(block == qemu_get_ram_block(end - 1));
1240be24 833 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
041603fe 834 cpu_tlb_reset_dirty_all(start1, length);
d24981d3
JQ
835}
836
5579c7f3 837/* Note: start and end must be within the same ram block. */
a2f4d5be 838void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 839 unsigned client)
1ccde1cb 840{
1ccde1cb
FB
841 if (length == 0)
842 return;
c8d6f66a 843 cpu_physical_memory_clear_dirty_range_type(start, length, client);
f23db169 844
d24981d3 845 if (tcg_enabled()) {
a2f4d5be 846 tlb_reset_dirty_range_all(start, length);
5579c7f3 847 }
1ccde1cb
FB
848}
849
981fdf23 850static void cpu_physical_memory_set_dirty_tracking(bool enable)
74576198
AL
851{
852 in_migration = enable;
74576198
AL
853}
854
bb0e627a 855hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
856 MemoryRegionSection *section,
857 target_ulong vaddr,
858 hwaddr paddr, hwaddr xlat,
859 int prot,
860 target_ulong *address)
e5548617 861{
a8170e5e 862 hwaddr iotlb;
e5548617
BS
863 CPUWatchpoint *wp;
864
cc5bea60 865 if (memory_region_is_ram(section->mr)) {
e5548617
BS
866 /* Normal RAM. */
867 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 868 + xlat;
e5548617 869 if (!section->readonly) {
b41aac4f 870 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 871 } else {
b41aac4f 872 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
873 }
874 } else {
1b3fb98f 875 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 876 iotlb += xlat;
e5548617
BS
877 }
878
879 /* Make accesses to pages with watchpoints go via the
880 watchpoint trap routines. */
ff4700b0 881 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d 882 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
e5548617
BS
883 /* Avoid trapping reads of pages with a write breakpoint. */
884 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 885 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
886 *address |= TLB_MMIO;
887 break;
888 }
889 }
890 }
891
892 return iotlb;
893}
9fa3e853
FB
894#endif /* defined(CONFIG_USER_ONLY) */
895
e2eef170 896#if !defined(CONFIG_USER_ONLY)
8da3ff18 897
c227f099 898static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 899 uint16_t section);
acc9d80b 900static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 901
a2b257d6
IM
902static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
903 qemu_anon_ram_alloc;
91138037
MA
904
905/*
906 * Set a custom physical guest memory alloator.
907 * Accelerators with unusual needs may need this. Hopefully, we can
908 * get rid of it eventually.
909 */
a2b257d6 910void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
91138037
MA
911{
912 phys_mem_alloc = alloc;
913}
914
53cb28cb
MA
915static uint16_t phys_section_add(PhysPageMap *map,
916 MemoryRegionSection *section)
5312bd8b 917{
68f3f65b
PB
918 /* The physical section number is ORed with a page-aligned
919 * pointer to produce the iotlb entries. Thus it should
920 * never overflow into the page-aligned value.
921 */
53cb28cb 922 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 923
53cb28cb
MA
924 if (map->sections_nb == map->sections_nb_alloc) {
925 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
926 map->sections = g_renew(MemoryRegionSection, map->sections,
927 map->sections_nb_alloc);
5312bd8b 928 }
53cb28cb 929 map->sections[map->sections_nb] = *section;
dfde4e6e 930 memory_region_ref(section->mr);
53cb28cb 931 return map->sections_nb++;
5312bd8b
AK
932}
933
058bc4b5
PB
934static void phys_section_destroy(MemoryRegion *mr)
935{
dfde4e6e
PB
936 memory_region_unref(mr);
937
058bc4b5
PB
938 if (mr->subpage) {
939 subpage_t *subpage = container_of(mr, subpage_t, iomem);
b4fefef9 940 object_unref(OBJECT(&subpage->iomem));
058bc4b5
PB
941 g_free(subpage);
942 }
943}
944
6092666e 945static void phys_sections_free(PhysPageMap *map)
5312bd8b 946{
9affd6fc
PB
947 while (map->sections_nb > 0) {
948 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
949 phys_section_destroy(section->mr);
950 }
9affd6fc
PB
951 g_free(map->sections);
952 g_free(map->nodes);
5312bd8b
AK
953}
954
ac1970fb 955static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
956{
957 subpage_t *subpage;
a8170e5e 958 hwaddr base = section->offset_within_address_space
0f0cb164 959 & TARGET_PAGE_MASK;
97115a8d 960 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 961 d->map.nodes, d->map.sections);
0f0cb164
AK
962 MemoryRegionSection subsection = {
963 .offset_within_address_space = base,
052e87b0 964 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 965 };
a8170e5e 966 hwaddr start, end;
0f0cb164 967
f3705d53 968 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 969
f3705d53 970 if (!(existing->mr->subpage)) {
acc9d80b 971 subpage = subpage_init(d->as, base);
3be91e86 972 subsection.address_space = d->as;
0f0cb164 973 subsection.mr = &subpage->iomem;
ac1970fb 974 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 975 phys_section_add(&d->map, &subsection));
0f0cb164 976 } else {
f3705d53 977 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
978 }
979 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 980 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
981 subpage_register(subpage, start, end,
982 phys_section_add(&d->map, section));
0f0cb164
AK
983}
984
985
052e87b0
PB
986static void register_multipage(AddressSpaceDispatch *d,
987 MemoryRegionSection *section)
33417e70 988{
a8170e5e 989 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 990 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
991 uint64_t num_pages = int128_get64(int128_rshift(section->size,
992 TARGET_PAGE_BITS));
dd81124b 993
733d5ef5
PB
994 assert(num_pages);
995 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
996}
997
ac1970fb 998static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 999{
89ae337a 1000 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 1001 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 1002 MemoryRegionSection now = *section, remain = *section;
052e87b0 1003 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 1004
733d5ef5
PB
1005 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1006 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1007 - now.offset_within_address_space;
1008
052e87b0 1009 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 1010 register_subpage(d, &now);
733d5ef5 1011 } else {
052e87b0 1012 now.size = int128_zero();
733d5ef5 1013 }
052e87b0
PB
1014 while (int128_ne(remain.size, now.size)) {
1015 remain.size = int128_sub(remain.size, now.size);
1016 remain.offset_within_address_space += int128_get64(now.size);
1017 remain.offset_within_region += int128_get64(now.size);
69b67646 1018 now = remain;
052e87b0 1019 if (int128_lt(remain.size, page_size)) {
733d5ef5 1020 register_subpage(d, &now);
88266249 1021 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 1022 now.size = page_size;
ac1970fb 1023 register_subpage(d, &now);
69b67646 1024 } else {
052e87b0 1025 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 1026 register_multipage(d, &now);
69b67646 1027 }
0f0cb164
AK
1028 }
1029}
1030
62a2744c
SY
1031void qemu_flush_coalesced_mmio_buffer(void)
1032{
1033 if (kvm_enabled())
1034 kvm_flush_coalesced_mmio_buffer();
1035}
1036
b2a8658e
UD
1037void qemu_mutex_lock_ramlist(void)
1038{
1039 qemu_mutex_lock(&ram_list.mutex);
1040}
1041
1042void qemu_mutex_unlock_ramlist(void)
1043{
1044 qemu_mutex_unlock(&ram_list.mutex);
1045}
1046
e1e84ba0 1047#ifdef __linux__
c902760f
MT
1048
1049#include <sys/vfs.h>
1050
1051#define HUGETLBFS_MAGIC 0x958458f6
1052
fc7a5800 1053static long gethugepagesize(const char *path, Error **errp)
c902760f
MT
1054{
1055 struct statfs fs;
1056 int ret;
1057
1058 do {
9742bf26 1059 ret = statfs(path, &fs);
c902760f
MT
1060 } while (ret != 0 && errno == EINTR);
1061
1062 if (ret != 0) {
fc7a5800
HT
1063 error_setg_errno(errp, errno, "failed to get page size of file %s",
1064 path);
9742bf26 1065 return 0;
c902760f
MT
1066 }
1067
1068 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1069 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1070
1071 return fs.f_bsize;
1072}
1073
04b16653
AW
1074static void *file_ram_alloc(RAMBlock *block,
1075 ram_addr_t memory,
7f56e740
PB
1076 const char *path,
1077 Error **errp)
c902760f
MT
1078{
1079 char *filename;
8ca761f6
PF
1080 char *sanitized_name;
1081 char *c;
557529dd 1082 void *area = NULL;
c902760f 1083 int fd;
557529dd 1084 uint64_t hpagesize;
fc7a5800 1085 Error *local_err = NULL;
c902760f 1086
fc7a5800
HT
1087 hpagesize = gethugepagesize(path, &local_err);
1088 if (local_err) {
1089 error_propagate(errp, local_err);
f9a49dfa 1090 goto error;
c902760f 1091 }
a2b257d6 1092 block->mr->align = hpagesize;
c902760f
MT
1093
1094 if (memory < hpagesize) {
557529dd
HT
1095 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1096 "or larger than huge page size 0x%" PRIx64,
1097 memory, hpagesize);
1098 goto error;
c902760f
MT
1099 }
1100
1101 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1102 error_setg(errp,
1103 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1104 goto error;
c902760f
MT
1105 }
1106
8ca761f6 1107 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
83234bf2 1108 sanitized_name = g_strdup(memory_region_name(block->mr));
8ca761f6
PF
1109 for (c = sanitized_name; *c != '\0'; c++) {
1110 if (*c == '/')
1111 *c = '_';
1112 }
1113
1114 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1115 sanitized_name);
1116 g_free(sanitized_name);
c902760f
MT
1117
1118 fd = mkstemp(filename);
1119 if (fd < 0) {
7f56e740
PB
1120 error_setg_errno(errp, errno,
1121 "unable to create backing store for hugepages");
e4ada482 1122 g_free(filename);
f9a49dfa 1123 goto error;
c902760f
MT
1124 }
1125 unlink(filename);
e4ada482 1126 g_free(filename);
c902760f
MT
1127
1128 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1129
1130 /*
1131 * ftruncate is not supported by hugetlbfs in older
1132 * hosts, so don't bother bailing out on errors.
1133 * If anything goes wrong with it under other filesystems,
1134 * mmap will fail.
1135 */
7f56e740 1136 if (ftruncate(fd, memory)) {
9742bf26 1137 perror("ftruncate");
7f56e740 1138 }
c902760f 1139
dbcb8981
PB
1140 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1141 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1142 fd, 0);
c902760f 1143 if (area == MAP_FAILED) {
7f56e740
PB
1144 error_setg_errno(errp, errno,
1145 "unable to map backing store for hugepages");
9742bf26 1146 close(fd);
f9a49dfa 1147 goto error;
c902760f 1148 }
ef36fa14
MT
1149
1150 if (mem_prealloc) {
38183310 1151 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1152 }
1153
04b16653 1154 block->fd = fd;
c902760f 1155 return area;
f9a49dfa
MT
1156
1157error:
1158 if (mem_prealloc) {
e4d9df4f 1159 error_report("%s\n", error_get_pretty(*errp));
f9a49dfa
MT
1160 exit(1);
1161 }
1162 return NULL;
c902760f
MT
1163}
1164#endif
1165
d17b5288 1166static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1167{
1168 RAMBlock *block, *next_block;
3e837b2c 1169 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1170
49cd9ac6
SH
1171 assert(size != 0); /* it would hand out same offset multiple times */
1172
a3161038 1173 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1174 return 0;
1175
a3161038 1176 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1177 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653 1178
62be4e3a 1179 end = block->offset + block->max_length;
04b16653 1180
a3161038 1181 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1182 if (next_block->offset >= end) {
1183 next = MIN(next, next_block->offset);
1184 }
1185 }
1186 if (next - end >= size && next - end < mingap) {
3e837b2c 1187 offset = end;
04b16653
AW
1188 mingap = next - end;
1189 }
1190 }
3e837b2c
AW
1191
1192 if (offset == RAM_ADDR_MAX) {
1193 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1194 (uint64_t)size);
1195 abort();
1196 }
1197
04b16653
AW
1198 return offset;
1199}
1200
652d7ec2 1201ram_addr_t last_ram_offset(void)
d17b5288
AW
1202{
1203 RAMBlock *block;
1204 ram_addr_t last = 0;
1205
a3161038 1206 QTAILQ_FOREACH(block, &ram_list.blocks, next)
62be4e3a 1207 last = MAX(last, block->offset + block->max_length);
d17b5288
AW
1208
1209 return last;
1210}
1211
ddb97f1d
JB
1212static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1213{
1214 int ret;
ddb97f1d
JB
1215
1216 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1217 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1218 "dump-guest-core", true)) {
ddb97f1d
JB
1219 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1220 if (ret) {
1221 perror("qemu_madvise");
1222 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1223 "but dump_guest_core=off specified\n");
1224 }
1225 }
1226}
1227
20cfe881 1228static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1229{
20cfe881 1230 RAMBlock *block;
84b89d78 1231
a3161038 1232 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1233 if (block->offset == addr) {
20cfe881 1234 return block;
c5705a77
AK
1235 }
1236 }
20cfe881
HT
1237
1238 return NULL;
1239}
1240
1241void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1242{
1243 RAMBlock *new_block = find_ram_block(addr);
1244 RAMBlock *block;
1245
c5705a77
AK
1246 assert(new_block);
1247 assert(!new_block->idstr[0]);
84b89d78 1248
09e5ab63
AL
1249 if (dev) {
1250 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1251 if (id) {
1252 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1253 g_free(id);
84b89d78
CM
1254 }
1255 }
1256 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1257
b2a8658e
UD
1258 /* This assumes the iothread lock is taken here too. */
1259 qemu_mutex_lock_ramlist();
a3161038 1260 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1261 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1262 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1263 new_block->idstr);
1264 abort();
1265 }
1266 }
b2a8658e 1267 qemu_mutex_unlock_ramlist();
c5705a77
AK
1268}
1269
20cfe881
HT
1270void qemu_ram_unset_idstr(ram_addr_t addr)
1271{
1272 RAMBlock *block = find_ram_block(addr);
1273
1274 if (block) {
1275 memset(block->idstr, 0, sizeof(block->idstr));
1276 }
1277}
1278
8490fc78
LC
1279static int memory_try_enable_merging(void *addr, size_t len)
1280{
2ff3de68 1281 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1282 /* disabled by the user */
1283 return 0;
1284 }
1285
1286 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1287}
1288
62be4e3a
MT
1289/* Only legal before guest might have detected the memory size: e.g. on
1290 * incoming migration, or right after reset.
1291 *
1292 * As memory core doesn't know how is memory accessed, it is up to
1293 * resize callback to update device state and/or add assertions to detect
1294 * misuse, if necessary.
1295 */
1296int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1297{
1298 RAMBlock *block = find_ram_block(base);
1299
1300 assert(block);
1301
1302 if (block->used_length == newsize) {
1303 return 0;
1304 }
1305
1306 if (!(block->flags & RAM_RESIZEABLE)) {
1307 error_setg_errno(errp, EINVAL,
1308 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1309 " in != 0x" RAM_ADDR_FMT, block->idstr,
1310 newsize, block->used_length);
1311 return -EINVAL;
1312 }
1313
1314 if (block->max_length < newsize) {
1315 error_setg_errno(errp, EINVAL,
1316 "Length too large: %s: 0x" RAM_ADDR_FMT
1317 " > 0x" RAM_ADDR_FMT, block->idstr,
1318 newsize, block->max_length);
1319 return -EINVAL;
1320 }
1321
1322 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1323 block->used_length = newsize;
1324 cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
1325 memory_region_set_size(block->mr, newsize);
1326 if (block->resized) {
1327 block->resized(block->idstr, newsize, block->host);
1328 }
1329 return 0;
1330}
1331
ef701d7b 1332static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
c5705a77 1333{
e1c57ab8 1334 RAMBlock *block;
2152f5ca
JQ
1335 ram_addr_t old_ram_size, new_ram_size;
1336
1337 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1338
b2a8658e
UD
1339 /* This assumes the iothread lock is taken here too. */
1340 qemu_mutex_lock_ramlist();
9b8424d5 1341 new_block->offset = find_ram_offset(new_block->max_length);
e1c57ab8
PB
1342
1343 if (!new_block->host) {
1344 if (xen_enabled()) {
9b8424d5
MT
1345 xen_ram_alloc(new_block->offset, new_block->max_length,
1346 new_block->mr);
e1c57ab8 1347 } else {
9b8424d5 1348 new_block->host = phys_mem_alloc(new_block->max_length,
a2b257d6 1349 &new_block->mr->align);
39228250 1350 if (!new_block->host) {
ef701d7b
HT
1351 error_setg_errno(errp, errno,
1352 "cannot set up guest memory '%s'",
1353 memory_region_name(new_block->mr));
1354 qemu_mutex_unlock_ramlist();
1355 return -1;
39228250 1356 }
9b8424d5 1357 memory_try_enable_merging(new_block->host, new_block->max_length);
6977dfe6 1358 }
c902760f 1359 }
94a6b54f 1360
abb26d63
PB
1361 /* Keep the list sorted from biggest to smallest block. */
1362 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
9b8424d5 1363 if (block->max_length < new_block->max_length) {
abb26d63
PB
1364 break;
1365 }
1366 }
1367 if (block) {
1368 QTAILQ_INSERT_BEFORE(block, new_block, next);
1369 } else {
1370 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1371 }
0d6d3c87 1372 ram_list.mru_block = NULL;
94a6b54f 1373
f798b07f 1374 ram_list.version++;
b2a8658e 1375 qemu_mutex_unlock_ramlist();
f798b07f 1376
2152f5ca
JQ
1377 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1378
1379 if (new_ram_size > old_ram_size) {
1ab4c8ce
JQ
1380 int i;
1381 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1382 ram_list.dirty_memory[i] =
1383 bitmap_zero_extend(ram_list.dirty_memory[i],
1384 old_ram_size, new_ram_size);
1385 }
2152f5ca 1386 }
9b8424d5
MT
1387 cpu_physical_memory_set_dirty_range(new_block->offset,
1388 new_block->used_length);
94a6b54f 1389
a904c911
PB
1390 if (new_block->host) {
1391 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1392 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1393 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1394 if (kvm_enabled()) {
1395 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1396 }
e1c57ab8 1397 }
6f0437e8 1398
94a6b54f
PB
1399 return new_block->offset;
1400}
e9a1ab19 1401
0b183fc8 1402#ifdef __linux__
e1c57ab8 1403ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1404 bool share, const char *mem_path,
7f56e740 1405 Error **errp)
e1c57ab8
PB
1406{
1407 RAMBlock *new_block;
ef701d7b
HT
1408 ram_addr_t addr;
1409 Error *local_err = NULL;
e1c57ab8
PB
1410
1411 if (xen_enabled()) {
7f56e740
PB
1412 error_setg(errp, "-mem-path not supported with Xen");
1413 return -1;
e1c57ab8
PB
1414 }
1415
1416 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1417 /*
1418 * file_ram_alloc() needs to allocate just like
1419 * phys_mem_alloc, but we haven't bothered to provide
1420 * a hook there.
1421 */
7f56e740
PB
1422 error_setg(errp,
1423 "-mem-path not supported with this accelerator");
1424 return -1;
e1c57ab8
PB
1425 }
1426
1427 size = TARGET_PAGE_ALIGN(size);
1428 new_block = g_malloc0(sizeof(*new_block));
1429 new_block->mr = mr;
9b8424d5
MT
1430 new_block->used_length = size;
1431 new_block->max_length = size;
dbcb8981 1432 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1433 new_block->host = file_ram_alloc(new_block, size,
1434 mem_path, errp);
1435 if (!new_block->host) {
1436 g_free(new_block);
1437 return -1;
1438 }
1439
ef701d7b
HT
1440 addr = ram_block_add(new_block, &local_err);
1441 if (local_err) {
1442 g_free(new_block);
1443 error_propagate(errp, local_err);
1444 return -1;
1445 }
1446 return addr;
e1c57ab8 1447}
0b183fc8 1448#endif
e1c57ab8 1449
62be4e3a
MT
1450static
1451ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1452 void (*resized)(const char*,
1453 uint64_t length,
1454 void *host),
1455 void *host, bool resizeable,
ef701d7b 1456 MemoryRegion *mr, Error **errp)
e1c57ab8
PB
1457{
1458 RAMBlock *new_block;
ef701d7b
HT
1459 ram_addr_t addr;
1460 Error *local_err = NULL;
e1c57ab8
PB
1461
1462 size = TARGET_PAGE_ALIGN(size);
62be4e3a 1463 max_size = TARGET_PAGE_ALIGN(max_size);
e1c57ab8
PB
1464 new_block = g_malloc0(sizeof(*new_block));
1465 new_block->mr = mr;
62be4e3a 1466 new_block->resized = resized;
9b8424d5
MT
1467 new_block->used_length = size;
1468 new_block->max_length = max_size;
62be4e3a 1469 assert(max_size >= size);
e1c57ab8
PB
1470 new_block->fd = -1;
1471 new_block->host = host;
1472 if (host) {
7bd4f430 1473 new_block->flags |= RAM_PREALLOC;
e1c57ab8 1474 }
62be4e3a
MT
1475 if (resizeable) {
1476 new_block->flags |= RAM_RESIZEABLE;
1477 }
ef701d7b
HT
1478 addr = ram_block_add(new_block, &local_err);
1479 if (local_err) {
1480 g_free(new_block);
1481 error_propagate(errp, local_err);
1482 return -1;
1483 }
1484 return addr;
e1c57ab8
PB
1485}
1486
62be4e3a
MT
1487ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1488 MemoryRegion *mr, Error **errp)
1489{
1490 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1491}
1492
ef701d7b 1493ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
6977dfe6 1494{
62be4e3a
MT
1495 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1496}
1497
1498ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1499 void (*resized)(const char*,
1500 uint64_t length,
1501 void *host),
1502 MemoryRegion *mr, Error **errp)
1503{
1504 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
6977dfe6
YT
1505}
1506
1f2e98b6
AW
1507void qemu_ram_free_from_ptr(ram_addr_t addr)
1508{
1509 RAMBlock *block;
1510
b2a8658e
UD
1511 /* This assumes the iothread lock is taken here too. */
1512 qemu_mutex_lock_ramlist();
a3161038 1513 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1514 if (addr == block->offset) {
a3161038 1515 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1516 ram_list.mru_block = NULL;
f798b07f 1517 ram_list.version++;
7267c094 1518 g_free(block);
b2a8658e 1519 break;
1f2e98b6
AW
1520 }
1521 }
b2a8658e 1522 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1523}
1524
c227f099 1525void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1526{
04b16653
AW
1527 RAMBlock *block;
1528
b2a8658e
UD
1529 /* This assumes the iothread lock is taken here too. */
1530 qemu_mutex_lock_ramlist();
a3161038 1531 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1532 if (addr == block->offset) {
a3161038 1533 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1534 ram_list.mru_block = NULL;
f798b07f 1535 ram_list.version++;
7bd4f430 1536 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1537 ;
dfeaf2ab
MA
1538 } else if (xen_enabled()) {
1539 xen_invalidate_map_cache_entry(block->host);
089f3f76 1540#ifndef _WIN32
3435f395 1541 } else if (block->fd >= 0) {
9b8424d5 1542 munmap(block->host, block->max_length);
3435f395 1543 close(block->fd);
089f3f76 1544#endif
04b16653 1545 } else {
9b8424d5 1546 qemu_anon_ram_free(block->host, block->max_length);
04b16653 1547 }
7267c094 1548 g_free(block);
b2a8658e 1549 break;
04b16653
AW
1550 }
1551 }
b2a8658e 1552 qemu_mutex_unlock_ramlist();
04b16653 1553
e9a1ab19
FB
1554}
1555
cd19cfa2
HY
1556#ifndef _WIN32
1557void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1558{
1559 RAMBlock *block;
1560 ram_addr_t offset;
1561 int flags;
1562 void *area, *vaddr;
1563
a3161038 1564 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2 1565 offset = addr - block->offset;
9b8424d5 1566 if (offset < block->max_length) {
1240be24 1567 vaddr = ramblock_ptr(block, offset);
7bd4f430 1568 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1569 ;
dfeaf2ab
MA
1570 } else if (xen_enabled()) {
1571 abort();
cd19cfa2
HY
1572 } else {
1573 flags = MAP_FIXED;
1574 munmap(vaddr, length);
3435f395 1575 if (block->fd >= 0) {
dbcb8981
PB
1576 flags |= (block->flags & RAM_SHARED ?
1577 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1578 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1579 flags, block->fd, offset);
cd19cfa2 1580 } else {
2eb9fbaa
MA
1581 /*
1582 * Remap needs to match alloc. Accelerators that
1583 * set phys_mem_alloc never remap. If they did,
1584 * we'd need a remap hook here.
1585 */
1586 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1587
cd19cfa2
HY
1588 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1589 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1590 flags, -1, 0);
cd19cfa2
HY
1591 }
1592 if (area != vaddr) {
f15fbc4b
AP
1593 fprintf(stderr, "Could not remap addr: "
1594 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1595 length, addr);
1596 exit(1);
1597 }
8490fc78 1598 memory_try_enable_merging(vaddr, length);
ddb97f1d 1599 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1600 }
1601 return;
1602 }
1603 }
1604}
1605#endif /* !_WIN32 */
1606
a35ba7be
PB
1607int qemu_get_ram_fd(ram_addr_t addr)
1608{
1609 RAMBlock *block = qemu_get_ram_block(addr);
1610
1611 return block->fd;
1612}
1613
3fd74b84
DM
1614void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1615{
1616 RAMBlock *block = qemu_get_ram_block(addr);
1617
1240be24 1618 return ramblock_ptr(block, 0);
3fd74b84
DM
1619}
1620
1b5ec234
PB
1621/* Return a host pointer to ram allocated with qemu_ram_alloc.
1622 With the exception of the softmmu code in this file, this should
1623 only be used for local memory (e.g. video ram) that the device owns,
1624 and knows it isn't going to access beyond the end of the block.
1625
1626 It should not be used for general purpose DMA.
1627 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1628 */
1629void *qemu_get_ram_ptr(ram_addr_t addr)
1630{
1631 RAMBlock *block = qemu_get_ram_block(addr);
1632
0d6d3c87
PB
1633 if (xen_enabled()) {
1634 /* We need to check if the requested address is in the RAM
1635 * because we don't want to map the entire memory in QEMU.
1636 * In that case just map until the end of the page.
1637 */
1638 if (block->offset == 0) {
1639 return xen_map_cache(addr, 0, 0);
1640 } else if (block->host == NULL) {
1641 block->host =
9b8424d5 1642 xen_map_cache(block->offset, block->max_length, 1);
0d6d3c87
PB
1643 }
1644 }
1240be24 1645 return ramblock_ptr(block, addr - block->offset);
dc828ca1
PB
1646}
1647
38bee5dc
SS
1648/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1649 * but takes a size argument */
cb85f7ab 1650static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1651{
8ab934f9
SS
1652 if (*size == 0) {
1653 return NULL;
1654 }
868bb33f 1655 if (xen_enabled()) {
e41d7c69 1656 return xen_map_cache(addr, *size, 1);
868bb33f 1657 } else {
38bee5dc
SS
1658 RAMBlock *block;
1659
a3161038 1660 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
9b8424d5
MT
1661 if (addr - block->offset < block->max_length) {
1662 if (addr - block->offset + *size > block->max_length)
1663 *size = block->max_length - addr + block->offset;
1240be24 1664 return ramblock_ptr(block, addr - block->offset);
38bee5dc
SS
1665 }
1666 }
1667
1668 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1669 abort();
38bee5dc
SS
1670 }
1671}
1672
7443b437
PB
1673/* Some of the softmmu routines need to translate from a host pointer
1674 (typically a TLB entry) back to a ram offset. */
1b5ec234 1675MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1676{
94a6b54f
PB
1677 RAMBlock *block;
1678 uint8_t *host = ptr;
1679
868bb33f 1680 if (xen_enabled()) {
e41d7c69 1681 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1682 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1683 }
1684
23887b79 1685 block = ram_list.mru_block;
9b8424d5 1686 if (block && block->host && host - block->host < block->max_length) {
23887b79
PB
1687 goto found;
1688 }
1689
a3161038 1690 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1691 /* This case append when the block is not mapped. */
1692 if (block->host == NULL) {
1693 continue;
1694 }
9b8424d5 1695 if (host - block->host < block->max_length) {
23887b79 1696 goto found;
f471a17e 1697 }
94a6b54f 1698 }
432d268c 1699
1b5ec234 1700 return NULL;
23887b79
PB
1701
1702found:
1703 *ram_addr = block->offset + (host - block->host);
1b5ec234 1704 return block->mr;
e890261f 1705}
f471a17e 1706
a8170e5e 1707static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1708 uint64_t val, unsigned size)
9fa3e853 1709{
52159192 1710 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1711 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1712 }
0e0df1e2
AK
1713 switch (size) {
1714 case 1:
1715 stb_p(qemu_get_ram_ptr(ram_addr), val);
1716 break;
1717 case 2:
1718 stw_p(qemu_get_ram_ptr(ram_addr), val);
1719 break;
1720 case 4:
1721 stl_p(qemu_get_ram_ptr(ram_addr), val);
1722 break;
1723 default:
1724 abort();
3a7d929e 1725 }
6886867e 1726 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
f23db169
FB
1727 /* we remove the notdirty callback only if the code has been
1728 flushed */
a2cd8c85 1729 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1730 CPUArchState *env = current_cpu->env_ptr;
93afeade 1731 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1732 }
9fa3e853
FB
1733}
1734
b018ddf6
PB
1735static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1736 unsigned size, bool is_write)
1737{
1738 return is_write;
1739}
1740
0e0df1e2 1741static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1742 .write = notdirty_mem_write,
b018ddf6 1743 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1744 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1745};
1746
0f459d16 1747/* Generate a debug exception if a watchpoint has been hit. */
05068c0d 1748static void check_watchpoint(int offset, int len, int flags)
0f459d16 1749{
93afeade
AF
1750 CPUState *cpu = current_cpu;
1751 CPUArchState *env = cpu->env_ptr;
06d55cc1 1752 target_ulong pc, cs_base;
0f459d16 1753 target_ulong vaddr;
a1d1bb31 1754 CPUWatchpoint *wp;
06d55cc1 1755 int cpu_flags;
0f459d16 1756
ff4700b0 1757 if (cpu->watchpoint_hit) {
06d55cc1
AL
1758 /* We re-entered the check after replacing the TB. Now raise
1759 * the debug interrupt so that is will trigger after the
1760 * current instruction. */
93afeade 1761 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1762 return;
1763 }
93afeade 1764 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1765 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
05068c0d
PM
1766 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1767 && (wp->flags & flags)) {
08225676
PM
1768 if (flags == BP_MEM_READ) {
1769 wp->flags |= BP_WATCHPOINT_HIT_READ;
1770 } else {
1771 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1772 }
1773 wp->hitaddr = vaddr;
ff4700b0
AF
1774 if (!cpu->watchpoint_hit) {
1775 cpu->watchpoint_hit = wp;
239c51a5 1776 tb_check_watchpoint(cpu);
6e140f28 1777 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1778 cpu->exception_index = EXCP_DEBUG;
5638d180 1779 cpu_loop_exit(cpu);
6e140f28
AL
1780 } else {
1781 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1782 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1783 cpu_resume_from_signal(cpu, NULL);
6e140f28 1784 }
06d55cc1 1785 }
6e140f28
AL
1786 } else {
1787 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1788 }
1789 }
1790}
1791
6658ffb8
PB
1792/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1793 so these check for a hit then pass through to the normal out-of-line
1794 phys routines. */
a8170e5e 1795static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1796 unsigned size)
6658ffb8 1797{
05068c0d 1798 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_READ);
1ec9b909 1799 switch (size) {
2c17449b 1800 case 1: return ldub_phys(&address_space_memory, addr);
41701aa4 1801 case 2: return lduw_phys(&address_space_memory, addr);
fdfba1a2 1802 case 4: return ldl_phys(&address_space_memory, addr);
1ec9b909
AK
1803 default: abort();
1804 }
6658ffb8
PB
1805}
1806
a8170e5e 1807static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1808 uint64_t val, unsigned size)
6658ffb8 1809{
05068c0d 1810 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, BP_MEM_WRITE);
1ec9b909 1811 switch (size) {
67364150 1812 case 1:
db3be60d 1813 stb_phys(&address_space_memory, addr, val);
67364150
MF
1814 break;
1815 case 2:
5ce5944d 1816 stw_phys(&address_space_memory, addr, val);
67364150
MF
1817 break;
1818 case 4:
ab1da857 1819 stl_phys(&address_space_memory, addr, val);
67364150 1820 break;
1ec9b909
AK
1821 default: abort();
1822 }
6658ffb8
PB
1823}
1824
1ec9b909
AK
1825static const MemoryRegionOps watch_mem_ops = {
1826 .read = watch_mem_read,
1827 .write = watch_mem_write,
1828 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1829};
6658ffb8 1830
a8170e5e 1831static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1832 unsigned len)
db7b5426 1833{
acc9d80b 1834 subpage_t *subpage = opaque;
ff6cff75 1835 uint8_t buf[8];
791af8c8 1836
db7b5426 1837#if defined(DEBUG_SUBPAGE)
016e9d62 1838 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1839 subpage, len, addr);
db7b5426 1840#endif
acc9d80b
JK
1841 address_space_read(subpage->as, addr + subpage->base, buf, len);
1842 switch (len) {
1843 case 1:
1844 return ldub_p(buf);
1845 case 2:
1846 return lduw_p(buf);
1847 case 4:
1848 return ldl_p(buf);
ff6cff75
PB
1849 case 8:
1850 return ldq_p(buf);
acc9d80b
JK
1851 default:
1852 abort();
1853 }
db7b5426
BS
1854}
1855
a8170e5e 1856static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1857 uint64_t value, unsigned len)
db7b5426 1858{
acc9d80b 1859 subpage_t *subpage = opaque;
ff6cff75 1860 uint8_t buf[8];
acc9d80b 1861
db7b5426 1862#if defined(DEBUG_SUBPAGE)
016e9d62 1863 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1864 " value %"PRIx64"\n",
1865 __func__, subpage, len, addr, value);
db7b5426 1866#endif
acc9d80b
JK
1867 switch (len) {
1868 case 1:
1869 stb_p(buf, value);
1870 break;
1871 case 2:
1872 stw_p(buf, value);
1873 break;
1874 case 4:
1875 stl_p(buf, value);
1876 break;
ff6cff75
PB
1877 case 8:
1878 stq_p(buf, value);
1879 break;
acc9d80b
JK
1880 default:
1881 abort();
1882 }
1883 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1884}
1885
c353e4cc 1886static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 1887 unsigned len, bool is_write)
c353e4cc 1888{
acc9d80b 1889 subpage_t *subpage = opaque;
c353e4cc 1890#if defined(DEBUG_SUBPAGE)
016e9d62 1891 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 1892 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1893#endif
1894
acc9d80b 1895 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 1896 len, is_write);
c353e4cc
PB
1897}
1898
70c68e44
AK
1899static const MemoryRegionOps subpage_ops = {
1900 .read = subpage_read,
1901 .write = subpage_write,
ff6cff75
PB
1902 .impl.min_access_size = 1,
1903 .impl.max_access_size = 8,
1904 .valid.min_access_size = 1,
1905 .valid.max_access_size = 8,
c353e4cc 1906 .valid.accepts = subpage_accepts,
70c68e44 1907 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1908};
1909
c227f099 1910static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1911 uint16_t section)
db7b5426
BS
1912{
1913 int idx, eidx;
1914
1915 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1916 return -1;
1917 idx = SUBPAGE_IDX(start);
1918 eidx = SUBPAGE_IDX(end);
1919#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1920 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1921 __func__, mmio, start, end, idx, eidx, section);
db7b5426 1922#endif
db7b5426 1923 for (; idx <= eidx; idx++) {
5312bd8b 1924 mmio->sub_section[idx] = section;
db7b5426
BS
1925 }
1926
1927 return 0;
1928}
1929
acc9d80b 1930static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1931{
c227f099 1932 subpage_t *mmio;
db7b5426 1933
7267c094 1934 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1935
acc9d80b 1936 mmio->as = as;
1eec614b 1937 mmio->base = base;
2c9b15ca 1938 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
b4fefef9 1939 NULL, TARGET_PAGE_SIZE);
b3b00c78 1940 mmio->iomem.subpage = true;
db7b5426 1941#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1942 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1943 mmio, base, TARGET_PAGE_SIZE);
db7b5426 1944#endif
b41aac4f 1945 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1946
1947 return mmio;
1948}
1949
a656e22f
PC
1950static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1951 MemoryRegion *mr)
5312bd8b 1952{
a656e22f 1953 assert(as);
5312bd8b 1954 MemoryRegionSection section = {
a656e22f 1955 .address_space = as,
5312bd8b
AK
1956 .mr = mr,
1957 .offset_within_address_space = 0,
1958 .offset_within_region = 0,
052e87b0 1959 .size = int128_2_64(),
5312bd8b
AK
1960 };
1961
53cb28cb 1962 return phys_section_add(map, &section);
5312bd8b
AK
1963}
1964
9d82b5a7 1965MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
aa102231 1966{
9d82b5a7
PB
1967 MemoryRegionSection *sections = cpu->memory_dispatch->map.sections;
1968
1969 return sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1970}
1971
e9179ce1
AK
1972static void io_mem_init(void)
1973{
1f6245e5 1974 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
2c9b15ca 1975 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
1f6245e5 1976 NULL, UINT64_MAX);
2c9b15ca 1977 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
1f6245e5 1978 NULL, UINT64_MAX);
2c9b15ca 1979 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1f6245e5 1980 NULL, UINT64_MAX);
e9179ce1
AK
1981}
1982
ac1970fb 1983static void mem_begin(MemoryListener *listener)
00752703
PB
1984{
1985 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
1986 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1987 uint16_t n;
1988
a656e22f 1989 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 1990 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 1991 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 1992 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 1993 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 1994 assert(n == PHYS_SECTION_ROM);
a656e22f 1995 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 1996 assert(n == PHYS_SECTION_WATCH);
00752703 1997
9736e55b 1998 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
1999 d->as = as;
2000 as->next_dispatch = d;
2001}
2002
2003static void mem_commit(MemoryListener *listener)
ac1970fb 2004{
89ae337a 2005 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
2006 AddressSpaceDispatch *cur = as->dispatch;
2007 AddressSpaceDispatch *next = as->next_dispatch;
2008
53cb28cb 2009 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 2010
0475d94f 2011 as->dispatch = next;
b41aac4f 2012
53cb28cb
MA
2013 if (cur) {
2014 phys_sections_free(&cur->map);
2015 g_free(cur);
2016 }
9affd6fc
PB
2017}
2018
1d71148e 2019static void tcg_commit(MemoryListener *listener)
50c1e149 2020{
182735ef 2021 CPUState *cpu;
117712c3
AK
2022
2023 /* since each CPU stores ram addresses in its TLB cache, we must
2024 reset the modified entries */
2025 /* XXX: slow ! */
bdc44640 2026 CPU_FOREACH(cpu) {
33bde2e1
EI
2027 /* FIXME: Disentangle the cpu.h circular files deps so we can
2028 directly get the right CPU from listener. */
2029 if (cpu->tcg_as_listener != listener) {
2030 continue;
2031 }
76e5c76f 2032 cpu_reload_memory_map(cpu);
117712c3 2033 }
50c1e149
AK
2034}
2035
93632747
AK
2036static void core_log_global_start(MemoryListener *listener)
2037{
981fdf23 2038 cpu_physical_memory_set_dirty_tracking(true);
93632747
AK
2039}
2040
2041static void core_log_global_stop(MemoryListener *listener)
2042{
981fdf23 2043 cpu_physical_memory_set_dirty_tracking(false);
93632747
AK
2044}
2045
93632747 2046static MemoryListener core_memory_listener = {
93632747
AK
2047 .log_global_start = core_log_global_start,
2048 .log_global_stop = core_log_global_stop,
ac1970fb 2049 .priority = 1,
93632747
AK
2050};
2051
ac1970fb
AK
2052void address_space_init_dispatch(AddressSpace *as)
2053{
00752703 2054 as->dispatch = NULL;
89ae337a 2055 as->dispatch_listener = (MemoryListener) {
ac1970fb 2056 .begin = mem_begin,
00752703 2057 .commit = mem_commit,
ac1970fb
AK
2058 .region_add = mem_add,
2059 .region_nop = mem_add,
2060 .priority = 0,
2061 };
89ae337a 2062 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
2063}
2064
6e48e8f9
PB
2065void address_space_unregister(AddressSpace *as)
2066{
2067 memory_listener_unregister(&as->dispatch_listener);
2068}
2069
83f3c251
AK
2070void address_space_destroy_dispatch(AddressSpace *as)
2071{
2072 AddressSpaceDispatch *d = as->dispatch;
2073
83f3c251
AK
2074 g_free(d);
2075 as->dispatch = NULL;
2076}
2077
62152b8a
AK
2078static void memory_map_init(void)
2079{
7267c094 2080 system_memory = g_malloc(sizeof(*system_memory));
03f49957 2081
57271d63 2082 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 2083 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 2084
7267c094 2085 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
2086 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2087 65536);
7dca8043 2088 address_space_init(&address_space_io, system_io, "I/O");
93632747 2089
f6790af6 2090 memory_listener_register(&core_memory_listener, &address_space_memory);
62152b8a
AK
2091}
2092
2093MemoryRegion *get_system_memory(void)
2094{
2095 return system_memory;
2096}
2097
309cb471
AK
2098MemoryRegion *get_system_io(void)
2099{
2100 return system_io;
2101}
2102
e2eef170
PB
2103#endif /* !defined(CONFIG_USER_ONLY) */
2104
13eb76e0
FB
2105/* physical memory access (slow version, mainly for debug) */
2106#if defined(CONFIG_USER_ONLY)
f17ec444 2107int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 2108 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2109{
2110 int l, flags;
2111 target_ulong page;
53a5960a 2112 void * p;
13eb76e0
FB
2113
2114 while (len > 0) {
2115 page = addr & TARGET_PAGE_MASK;
2116 l = (page + TARGET_PAGE_SIZE) - addr;
2117 if (l > len)
2118 l = len;
2119 flags = page_get_flags(page);
2120 if (!(flags & PAGE_VALID))
a68fe89c 2121 return -1;
13eb76e0
FB
2122 if (is_write) {
2123 if (!(flags & PAGE_WRITE))
a68fe89c 2124 return -1;
579a97f7 2125 /* XXX: this code should not depend on lock_user */
72fb7daa 2126 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 2127 return -1;
72fb7daa
AJ
2128 memcpy(p, buf, l);
2129 unlock_user(p, addr, l);
13eb76e0
FB
2130 } else {
2131 if (!(flags & PAGE_READ))
a68fe89c 2132 return -1;
579a97f7 2133 /* XXX: this code should not depend on lock_user */
72fb7daa 2134 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 2135 return -1;
72fb7daa 2136 memcpy(buf, p, l);
5b257578 2137 unlock_user(p, addr, 0);
13eb76e0
FB
2138 }
2139 len -= l;
2140 buf += l;
2141 addr += l;
2142 }
a68fe89c 2143 return 0;
13eb76e0 2144}
8df1cd07 2145
13eb76e0 2146#else
51d7a9eb 2147
a8170e5e
AK
2148static void invalidate_and_set_dirty(hwaddr addr,
2149 hwaddr length)
51d7a9eb 2150{
f874bf90
PM
2151 if (cpu_physical_memory_range_includes_clean(addr, length)) {
2152 tb_invalidate_phys_range(addr, addr + length, 0);
6886867e 2153 cpu_physical_memory_set_dirty_range_nocode(addr, length);
51d7a9eb 2154 }
e226939d 2155 xen_modified_memory(addr, length);
51d7a9eb
AP
2156}
2157
23326164 2158static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 2159{
e1622f4b 2160 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
2161
2162 /* Regions are assumed to support 1-4 byte accesses unless
2163 otherwise specified. */
23326164
RH
2164 if (access_size_max == 0) {
2165 access_size_max = 4;
2166 }
2167
2168 /* Bound the maximum access by the alignment of the address. */
2169 if (!mr->ops->impl.unaligned) {
2170 unsigned align_size_max = addr & -addr;
2171 if (align_size_max != 0 && align_size_max < access_size_max) {
2172 access_size_max = align_size_max;
2173 }
82f2563f 2174 }
23326164
RH
2175
2176 /* Don't attempt accesses larger than the maximum. */
2177 if (l > access_size_max) {
2178 l = access_size_max;
82f2563f 2179 }
098178f2
PB
2180 if (l & (l - 1)) {
2181 l = 1 << (qemu_fls(l) - 1);
2182 }
23326164
RH
2183
2184 return l;
82f2563f
PB
2185}
2186
fd8aaa76 2187bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 2188 int len, bool is_write)
13eb76e0 2189{
149f54b5 2190 hwaddr l;
13eb76e0 2191 uint8_t *ptr;
791af8c8 2192 uint64_t val;
149f54b5 2193 hwaddr addr1;
5c8a00ce 2194 MemoryRegion *mr;
fd8aaa76 2195 bool error = false;
3b46e624 2196
13eb76e0 2197 while (len > 0) {
149f54b5 2198 l = len;
5c8a00ce 2199 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2200
13eb76e0 2201 if (is_write) {
5c8a00ce
PB
2202 if (!memory_access_is_direct(mr, is_write)) {
2203 l = memory_access_size(mr, l, addr1);
4917cf44 2204 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2205 potential bugs */
23326164
RH
2206 switch (l) {
2207 case 8:
2208 /* 64 bit write access */
2209 val = ldq_p(buf);
2210 error |= io_mem_write(mr, addr1, val, 8);
2211 break;
2212 case 4:
1c213d19 2213 /* 32 bit write access */
c27004ec 2214 val = ldl_p(buf);
5c8a00ce 2215 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
2216 break;
2217 case 2:
1c213d19 2218 /* 16 bit write access */
c27004ec 2219 val = lduw_p(buf);
5c8a00ce 2220 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
2221 break;
2222 case 1:
1c213d19 2223 /* 8 bit write access */
c27004ec 2224 val = ldub_p(buf);
5c8a00ce 2225 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
2226 break;
2227 default:
2228 abort();
13eb76e0 2229 }
2bbfa05d 2230 } else {
5c8a00ce 2231 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2232 /* RAM case */
5579c7f3 2233 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2234 memcpy(ptr, buf, l);
51d7a9eb 2235 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
2236 }
2237 } else {
5c8a00ce 2238 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2239 /* I/O case */
5c8a00ce 2240 l = memory_access_size(mr, l, addr1);
23326164
RH
2241 switch (l) {
2242 case 8:
2243 /* 64 bit read access */
2244 error |= io_mem_read(mr, addr1, &val, 8);
2245 stq_p(buf, val);
2246 break;
2247 case 4:
13eb76e0 2248 /* 32 bit read access */
5c8a00ce 2249 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 2250 stl_p(buf, val);
23326164
RH
2251 break;
2252 case 2:
13eb76e0 2253 /* 16 bit read access */
5c8a00ce 2254 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 2255 stw_p(buf, val);
23326164
RH
2256 break;
2257 case 1:
1c213d19 2258 /* 8 bit read access */
5c8a00ce 2259 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 2260 stb_p(buf, val);
23326164
RH
2261 break;
2262 default:
2263 abort();
13eb76e0
FB
2264 }
2265 } else {
2266 /* RAM case */
5c8a00ce 2267 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2268 memcpy(buf, ptr, l);
13eb76e0
FB
2269 }
2270 }
2271 len -= l;
2272 buf += l;
2273 addr += l;
2274 }
fd8aaa76
PB
2275
2276 return error;
13eb76e0 2277}
8df1cd07 2278
fd8aaa76 2279bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2280 const uint8_t *buf, int len)
2281{
fd8aaa76 2282 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2283}
2284
fd8aaa76 2285bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2286{
fd8aaa76 2287 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2288}
2289
2290
a8170e5e 2291void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2292 int len, int is_write)
2293{
fd8aaa76 2294 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2295}
2296
582b55a9
AG
2297enum write_rom_type {
2298 WRITE_DATA,
2299 FLUSH_CACHE,
2300};
2301
2a221651 2302static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2303 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2304{
149f54b5 2305 hwaddr l;
d0ecd2aa 2306 uint8_t *ptr;
149f54b5 2307 hwaddr addr1;
5c8a00ce 2308 MemoryRegion *mr;
3b46e624 2309
d0ecd2aa 2310 while (len > 0) {
149f54b5 2311 l = len;
2a221651 2312 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2313
5c8a00ce
PB
2314 if (!(memory_region_is_ram(mr) ||
2315 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2316 /* do nothing */
2317 } else {
5c8a00ce 2318 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2319 /* ROM/RAM case */
5579c7f3 2320 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2321 switch (type) {
2322 case WRITE_DATA:
2323 memcpy(ptr, buf, l);
2324 invalidate_and_set_dirty(addr1, l);
2325 break;
2326 case FLUSH_CACHE:
2327 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2328 break;
2329 }
d0ecd2aa
FB
2330 }
2331 len -= l;
2332 buf += l;
2333 addr += l;
2334 }
2335}
2336
582b55a9 2337/* used for ROM loading : can write in RAM and ROM */
2a221651 2338void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2339 const uint8_t *buf, int len)
2340{
2a221651 2341 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2342}
2343
2344void cpu_flush_icache_range(hwaddr start, int len)
2345{
2346 /*
2347 * This function should do the same thing as an icache flush that was
2348 * triggered from within the guest. For TCG we are always cache coherent,
2349 * so there is no need to flush anything. For KVM / Xen we need to flush
2350 * the host's instruction cache at least.
2351 */
2352 if (tcg_enabled()) {
2353 return;
2354 }
2355
2a221651
EI
2356 cpu_physical_memory_write_rom_internal(&address_space_memory,
2357 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2358}
2359
6d16c2f8 2360typedef struct {
d3e71559 2361 MemoryRegion *mr;
6d16c2f8 2362 void *buffer;
a8170e5e
AK
2363 hwaddr addr;
2364 hwaddr len;
6d16c2f8
AL
2365} BounceBuffer;
2366
2367static BounceBuffer bounce;
2368
ba223c29
AL
2369typedef struct MapClient {
2370 void *opaque;
2371 void (*callback)(void *opaque);
72cf2d4f 2372 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2373} MapClient;
2374
72cf2d4f
BS
2375static QLIST_HEAD(map_client_list, MapClient) map_client_list
2376 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2377
2378void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2379{
7267c094 2380 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2381
2382 client->opaque = opaque;
2383 client->callback = callback;
72cf2d4f 2384 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2385 return client;
2386}
2387
8b9c99d9 2388static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2389{
2390 MapClient *client = (MapClient *)_client;
2391
72cf2d4f 2392 QLIST_REMOVE(client, link);
7267c094 2393 g_free(client);
ba223c29
AL
2394}
2395
2396static void cpu_notify_map_clients(void)
2397{
2398 MapClient *client;
2399
72cf2d4f
BS
2400 while (!QLIST_EMPTY(&map_client_list)) {
2401 client = QLIST_FIRST(&map_client_list);
ba223c29 2402 client->callback(client->opaque);
34d5e948 2403 cpu_unregister_map_client(client);
ba223c29
AL
2404 }
2405}
2406
51644ab7
PB
2407bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2408{
5c8a00ce 2409 MemoryRegion *mr;
51644ab7
PB
2410 hwaddr l, xlat;
2411
2412 while (len > 0) {
2413 l = len;
5c8a00ce
PB
2414 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2415 if (!memory_access_is_direct(mr, is_write)) {
2416 l = memory_access_size(mr, l, addr);
2417 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2418 return false;
2419 }
2420 }
2421
2422 len -= l;
2423 addr += l;
2424 }
2425 return true;
2426}
2427
6d16c2f8
AL
2428/* Map a physical memory region into a host virtual address.
2429 * May map a subset of the requested range, given by and returned in *plen.
2430 * May return NULL if resources needed to perform the mapping are exhausted.
2431 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2432 * Use cpu_register_map_client() to know when retrying the map operation is
2433 * likely to succeed.
6d16c2f8 2434 */
ac1970fb 2435void *address_space_map(AddressSpace *as,
a8170e5e
AK
2436 hwaddr addr,
2437 hwaddr *plen,
ac1970fb 2438 bool is_write)
6d16c2f8 2439{
a8170e5e 2440 hwaddr len = *plen;
e3127ae0
PB
2441 hwaddr done = 0;
2442 hwaddr l, xlat, base;
2443 MemoryRegion *mr, *this_mr;
2444 ram_addr_t raddr;
6d16c2f8 2445
e3127ae0
PB
2446 if (len == 0) {
2447 return NULL;
2448 }
38bee5dc 2449
e3127ae0
PB
2450 l = len;
2451 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2452 if (!memory_access_is_direct(mr, is_write)) {
2453 if (bounce.buffer) {
2454 return NULL;
6d16c2f8 2455 }
e85d9db5
KW
2456 /* Avoid unbounded allocations */
2457 l = MIN(l, TARGET_PAGE_SIZE);
2458 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2459 bounce.addr = addr;
2460 bounce.len = l;
d3e71559
PB
2461
2462 memory_region_ref(mr);
2463 bounce.mr = mr;
e3127ae0
PB
2464 if (!is_write) {
2465 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2466 }
6d16c2f8 2467
e3127ae0
PB
2468 *plen = l;
2469 return bounce.buffer;
2470 }
2471
2472 base = xlat;
2473 raddr = memory_region_get_ram_addr(mr);
2474
2475 for (;;) {
6d16c2f8
AL
2476 len -= l;
2477 addr += l;
e3127ae0
PB
2478 done += l;
2479 if (len == 0) {
2480 break;
2481 }
2482
2483 l = len;
2484 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2485 if (this_mr != mr || xlat != base + done) {
2486 break;
2487 }
6d16c2f8 2488 }
e3127ae0 2489
d3e71559 2490 memory_region_ref(mr);
e3127ae0
PB
2491 *plen = done;
2492 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2493}
2494
ac1970fb 2495/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2496 * Will also mark the memory as dirty if is_write == 1. access_len gives
2497 * the amount of memory that was actually read or written by the caller.
2498 */
a8170e5e
AK
2499void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2500 int is_write, hwaddr access_len)
6d16c2f8
AL
2501{
2502 if (buffer != bounce.buffer) {
d3e71559
PB
2503 MemoryRegion *mr;
2504 ram_addr_t addr1;
2505
2506 mr = qemu_ram_addr_from_host(buffer, &addr1);
2507 assert(mr != NULL);
6d16c2f8 2508 if (is_write) {
6886867e 2509 invalidate_and_set_dirty(addr1, access_len);
6d16c2f8 2510 }
868bb33f 2511 if (xen_enabled()) {
e41d7c69 2512 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2513 }
d3e71559 2514 memory_region_unref(mr);
6d16c2f8
AL
2515 return;
2516 }
2517 if (is_write) {
ac1970fb 2518 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2519 }
f8a83245 2520 qemu_vfree(bounce.buffer);
6d16c2f8 2521 bounce.buffer = NULL;
d3e71559 2522 memory_region_unref(bounce.mr);
ba223c29 2523 cpu_notify_map_clients();
6d16c2f8 2524}
d0ecd2aa 2525
a8170e5e
AK
2526void *cpu_physical_memory_map(hwaddr addr,
2527 hwaddr *plen,
ac1970fb
AK
2528 int is_write)
2529{
2530 return address_space_map(&address_space_memory, addr, plen, is_write);
2531}
2532
a8170e5e
AK
2533void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2534 int is_write, hwaddr access_len)
ac1970fb
AK
2535{
2536 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2537}
2538
8df1cd07 2539/* warning: addr must be aligned */
fdfba1a2 2540static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2541 enum device_endian endian)
8df1cd07 2542{
8df1cd07 2543 uint8_t *ptr;
791af8c8 2544 uint64_t val;
5c8a00ce 2545 MemoryRegion *mr;
149f54b5
PB
2546 hwaddr l = 4;
2547 hwaddr addr1;
8df1cd07 2548
fdfba1a2 2549 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2550 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2551 /* I/O case */
5c8a00ce 2552 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2553#if defined(TARGET_WORDS_BIGENDIAN)
2554 if (endian == DEVICE_LITTLE_ENDIAN) {
2555 val = bswap32(val);
2556 }
2557#else
2558 if (endian == DEVICE_BIG_ENDIAN) {
2559 val = bswap32(val);
2560 }
2561#endif
8df1cd07
FB
2562 } else {
2563 /* RAM case */
5c8a00ce 2564 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2565 & TARGET_PAGE_MASK)
149f54b5 2566 + addr1);
1e78bcc1
AG
2567 switch (endian) {
2568 case DEVICE_LITTLE_ENDIAN:
2569 val = ldl_le_p(ptr);
2570 break;
2571 case DEVICE_BIG_ENDIAN:
2572 val = ldl_be_p(ptr);
2573 break;
2574 default:
2575 val = ldl_p(ptr);
2576 break;
2577 }
8df1cd07
FB
2578 }
2579 return val;
2580}
2581
fdfba1a2 2582uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2583{
fdfba1a2 2584 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2585}
2586
fdfba1a2 2587uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2588{
fdfba1a2 2589 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2590}
2591
fdfba1a2 2592uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2593{
fdfba1a2 2594 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2595}
2596
84b7b8e7 2597/* warning: addr must be aligned */
2c17449b 2598static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2599 enum device_endian endian)
84b7b8e7 2600{
84b7b8e7
FB
2601 uint8_t *ptr;
2602 uint64_t val;
5c8a00ce 2603 MemoryRegion *mr;
149f54b5
PB
2604 hwaddr l = 8;
2605 hwaddr addr1;
84b7b8e7 2606
2c17449b 2607 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2608 false);
2609 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2610 /* I/O case */
5c8a00ce 2611 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2612#if defined(TARGET_WORDS_BIGENDIAN)
2613 if (endian == DEVICE_LITTLE_ENDIAN) {
2614 val = bswap64(val);
2615 }
2616#else
2617 if (endian == DEVICE_BIG_ENDIAN) {
2618 val = bswap64(val);
2619 }
84b7b8e7
FB
2620#endif
2621 } else {
2622 /* RAM case */
5c8a00ce 2623 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2624 & TARGET_PAGE_MASK)
149f54b5 2625 + addr1);
1e78bcc1
AG
2626 switch (endian) {
2627 case DEVICE_LITTLE_ENDIAN:
2628 val = ldq_le_p(ptr);
2629 break;
2630 case DEVICE_BIG_ENDIAN:
2631 val = ldq_be_p(ptr);
2632 break;
2633 default:
2634 val = ldq_p(ptr);
2635 break;
2636 }
84b7b8e7
FB
2637 }
2638 return val;
2639}
2640
2c17449b 2641uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2642{
2c17449b 2643 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2644}
2645
2c17449b 2646uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2647{
2c17449b 2648 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2649}
2650
2c17449b 2651uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2652{
2c17449b 2653 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2654}
2655
aab33094 2656/* XXX: optimize */
2c17449b 2657uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
aab33094
FB
2658{
2659 uint8_t val;
2c17449b 2660 address_space_rw(as, addr, &val, 1, 0);
aab33094
FB
2661 return val;
2662}
2663
733f0b02 2664/* warning: addr must be aligned */
41701aa4 2665static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2666 enum device_endian endian)
aab33094 2667{
733f0b02
MT
2668 uint8_t *ptr;
2669 uint64_t val;
5c8a00ce 2670 MemoryRegion *mr;
149f54b5
PB
2671 hwaddr l = 2;
2672 hwaddr addr1;
733f0b02 2673
41701aa4 2674 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2675 false);
2676 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2677 /* I/O case */
5c8a00ce 2678 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2679#if defined(TARGET_WORDS_BIGENDIAN)
2680 if (endian == DEVICE_LITTLE_ENDIAN) {
2681 val = bswap16(val);
2682 }
2683#else
2684 if (endian == DEVICE_BIG_ENDIAN) {
2685 val = bswap16(val);
2686 }
2687#endif
733f0b02
MT
2688 } else {
2689 /* RAM case */
5c8a00ce 2690 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2691 & TARGET_PAGE_MASK)
149f54b5 2692 + addr1);
1e78bcc1
AG
2693 switch (endian) {
2694 case DEVICE_LITTLE_ENDIAN:
2695 val = lduw_le_p(ptr);
2696 break;
2697 case DEVICE_BIG_ENDIAN:
2698 val = lduw_be_p(ptr);
2699 break;
2700 default:
2701 val = lduw_p(ptr);
2702 break;
2703 }
733f0b02
MT
2704 }
2705 return val;
aab33094
FB
2706}
2707
41701aa4 2708uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2709{
41701aa4 2710 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2711}
2712
41701aa4 2713uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2714{
41701aa4 2715 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2716}
2717
41701aa4 2718uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2719{
41701aa4 2720 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2721}
2722
8df1cd07
FB
2723/* warning: addr must be aligned. The ram page is not masked as dirty
2724 and the code inside is not invalidated. It is useful if the dirty
2725 bits are used to track modified PTEs */
2198a121 2726void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
8df1cd07 2727{
8df1cd07 2728 uint8_t *ptr;
5c8a00ce 2729 MemoryRegion *mr;
149f54b5
PB
2730 hwaddr l = 4;
2731 hwaddr addr1;
8df1cd07 2732
2198a121 2733 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2734 true);
2735 if (l < 4 || !memory_access_is_direct(mr, true)) {
2736 io_mem_write(mr, addr1, val, 4);
8df1cd07 2737 } else {
5c8a00ce 2738 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2739 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2740 stl_p(ptr, val);
74576198
AL
2741
2742 if (unlikely(in_migration)) {
a2cd8c85 2743 if (cpu_physical_memory_is_clean(addr1)) {
74576198
AL
2744 /* invalidate code */
2745 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2746 /* set dirty bit */
6886867e 2747 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
74576198
AL
2748 }
2749 }
8df1cd07
FB
2750 }
2751}
2752
2753/* warning: addr must be aligned */
ab1da857
EI
2754static inline void stl_phys_internal(AddressSpace *as,
2755 hwaddr addr, uint32_t val,
1e78bcc1 2756 enum device_endian endian)
8df1cd07 2757{
8df1cd07 2758 uint8_t *ptr;
5c8a00ce 2759 MemoryRegion *mr;
149f54b5
PB
2760 hwaddr l = 4;
2761 hwaddr addr1;
8df1cd07 2762
ab1da857 2763 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2764 true);
2765 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2766#if defined(TARGET_WORDS_BIGENDIAN)
2767 if (endian == DEVICE_LITTLE_ENDIAN) {
2768 val = bswap32(val);
2769 }
2770#else
2771 if (endian == DEVICE_BIG_ENDIAN) {
2772 val = bswap32(val);
2773 }
2774#endif
5c8a00ce 2775 io_mem_write(mr, addr1, val, 4);
8df1cd07 2776 } else {
8df1cd07 2777 /* RAM case */
5c8a00ce 2778 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2779 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2780 switch (endian) {
2781 case DEVICE_LITTLE_ENDIAN:
2782 stl_le_p(ptr, val);
2783 break;
2784 case DEVICE_BIG_ENDIAN:
2785 stl_be_p(ptr, val);
2786 break;
2787 default:
2788 stl_p(ptr, val);
2789 break;
2790 }
51d7a9eb 2791 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2792 }
2793}
2794
ab1da857 2795void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2796{
ab1da857 2797 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2798}
2799
ab1da857 2800void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2801{
ab1da857 2802 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2803}
2804
ab1da857 2805void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2806{
ab1da857 2807 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2808}
2809
aab33094 2810/* XXX: optimize */
db3be60d 2811void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
aab33094
FB
2812{
2813 uint8_t v = val;
db3be60d 2814 address_space_rw(as, addr, &v, 1, 1);
aab33094
FB
2815}
2816
733f0b02 2817/* warning: addr must be aligned */
5ce5944d
EI
2818static inline void stw_phys_internal(AddressSpace *as,
2819 hwaddr addr, uint32_t val,
1e78bcc1 2820 enum device_endian endian)
aab33094 2821{
733f0b02 2822 uint8_t *ptr;
5c8a00ce 2823 MemoryRegion *mr;
149f54b5
PB
2824 hwaddr l = 2;
2825 hwaddr addr1;
733f0b02 2826
5ce5944d 2827 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 2828 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2829#if defined(TARGET_WORDS_BIGENDIAN)
2830 if (endian == DEVICE_LITTLE_ENDIAN) {
2831 val = bswap16(val);
2832 }
2833#else
2834 if (endian == DEVICE_BIG_ENDIAN) {
2835 val = bswap16(val);
2836 }
2837#endif
5c8a00ce 2838 io_mem_write(mr, addr1, val, 2);
733f0b02 2839 } else {
733f0b02 2840 /* RAM case */
5c8a00ce 2841 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2842 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2843 switch (endian) {
2844 case DEVICE_LITTLE_ENDIAN:
2845 stw_le_p(ptr, val);
2846 break;
2847 case DEVICE_BIG_ENDIAN:
2848 stw_be_p(ptr, val);
2849 break;
2850 default:
2851 stw_p(ptr, val);
2852 break;
2853 }
51d7a9eb 2854 invalidate_and_set_dirty(addr1, 2);
733f0b02 2855 }
aab33094
FB
2856}
2857
5ce5944d 2858void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2859{
5ce5944d 2860 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2861}
2862
5ce5944d 2863void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2864{
5ce5944d 2865 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2866}
2867
5ce5944d 2868void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2869{
5ce5944d 2870 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2871}
2872
aab33094 2873/* XXX: optimize */
f606604f 2874void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
aab33094
FB
2875{
2876 val = tswap64(val);
f606604f 2877 address_space_rw(as, addr, (void *) &val, 8, 1);
aab33094
FB
2878}
2879
f606604f 2880void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2881{
2882 val = cpu_to_le64(val);
f606604f 2883 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2884}
2885
f606604f 2886void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2887{
2888 val = cpu_to_be64(val);
f606604f 2889 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2890}
2891
5e2972fd 2892/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2893int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2894 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2895{
2896 int l;
a8170e5e 2897 hwaddr phys_addr;
9b3c35e0 2898 target_ulong page;
13eb76e0
FB
2899
2900 while (len > 0) {
2901 page = addr & TARGET_PAGE_MASK;
f17ec444 2902 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2903 /* if no physical page mapped, return an error */
2904 if (phys_addr == -1)
2905 return -1;
2906 l = (page + TARGET_PAGE_SIZE) - addr;
2907 if (l > len)
2908 l = len;
5e2972fd 2909 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
2910 if (is_write) {
2911 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2912 } else {
2913 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2914 }
13eb76e0
FB
2915 len -= l;
2916 buf += l;
2917 addr += l;
2918 }
2919 return 0;
2920}
a68fe89c 2921#endif
13eb76e0 2922
8e4a424b
BS
2923/*
2924 * A helper function for the _utterly broken_ virtio device model to find out if
2925 * it's running on a big endian machine. Don't do this at home kids!
2926 */
98ed8ecf
GK
2927bool target_words_bigendian(void);
2928bool target_words_bigendian(void)
8e4a424b
BS
2929{
2930#if defined(TARGET_WORDS_BIGENDIAN)
2931 return true;
2932#else
2933 return false;
2934#endif
2935}
2936
76f35538 2937#ifndef CONFIG_USER_ONLY
a8170e5e 2938bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2939{
5c8a00ce 2940 MemoryRegion*mr;
149f54b5 2941 hwaddr l = 1;
76f35538 2942
5c8a00ce
PB
2943 mr = address_space_translate(&address_space_memory,
2944 phys_addr, &phys_addr, &l, false);
76f35538 2945
5c8a00ce
PB
2946 return !(memory_region_is_ram(mr) ||
2947 memory_region_is_romd(mr));
76f35538 2948}
bd2fa51f
MH
2949
2950void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2951{
2952 RAMBlock *block;
2953
2954 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
9b8424d5 2955 func(block->host, block->offset, block->used_length, opaque);
bd2fa51f
MH
2956 }
2957}
ec3f8c99 2958#endif
This page took 1.39106 seconds and 4 git commands to generate.