]> Git Repo - qemu.git/blame - exec.c
memory: add memory_region_is_mapped() API
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
cc9e98cb 29#include "hw/qdev.h"
1de7afc9 30#include "qemu/osdep.h"
9c17d615 31#include "sysemu/kvm.h"
2ff3de68 32#include "sysemu/sysemu.h"
0d09e41a 33#include "hw/xen/xen.h"
1de7afc9
PB
34#include "qemu/timer.h"
35#include "qemu/config-file.h"
75a34036 36#include "qemu/error-report.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
220c3ebd 52#include "exec/ram_addr.h"
582b55a9 53#include "qemu/cache-utils.h"
67d95c15 54
b35ba30f
MT
55#include "qemu/range.h"
56
db7b5426 57//#define DEBUG_SUBPAGE
1196be37 58
e2eef170 59#if !defined(CONFIG_USER_ONLY)
981fdf23 60static bool in_migration;
94a6b54f 61
a3161038 62RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
63
64static MemoryRegion *system_memory;
309cb471 65static MemoryRegion *system_io;
62152b8a 66
f6790af6
AK
67AddressSpace address_space_io;
68AddressSpace address_space_memory;
2673a5da 69
0844e007 70MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 71static MemoryRegion io_mem_unassigned;
0e0df1e2 72
e2eef170 73#endif
9fa3e853 74
bdc44640 75struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
76/* current CPU in the current thread. It is only valid inside
77 cpu_exec() */
4917cf44 78DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 79/* 0 = Do not count executed instructions.
bf20dc07 80 1 = Precise instruction counting.
2e70f6ef 81 2 = Adaptive rate instruction counting. */
5708fc66 82int use_icount;
6a00d601 83
e2eef170 84#if !defined(CONFIG_USER_ONLY)
4346ae3e 85
1db8abb1
PB
86typedef struct PhysPageEntry PhysPageEntry;
87
88struct PhysPageEntry {
9736e55b 89 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 90 uint32_t skip : 6;
9736e55b 91 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 92 uint32_t ptr : 26;
1db8abb1
PB
93};
94
8b795765
MT
95#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
96
03f49957 97/* Size of the L2 (and L3, etc) page tables. */
57271d63 98#define ADDR_SPACE_BITS 64
03f49957 99
026736ce 100#define P_L2_BITS 9
03f49957
PB
101#define P_L2_SIZE (1 << P_L2_BITS)
102
103#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
104
105typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 106
53cb28cb
MA
107typedef struct PhysPageMap {
108 unsigned sections_nb;
109 unsigned sections_nb_alloc;
110 unsigned nodes_nb;
111 unsigned nodes_nb_alloc;
112 Node *nodes;
113 MemoryRegionSection *sections;
114} PhysPageMap;
115
1db8abb1
PB
116struct AddressSpaceDispatch {
117 /* This is a multi-level map on the physical address space.
118 * The bottom level has pointers to MemoryRegionSections.
119 */
120 PhysPageEntry phys_map;
53cb28cb 121 PhysPageMap map;
acc9d80b 122 AddressSpace *as;
1db8abb1
PB
123};
124
90260c6c
JK
125#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
126typedef struct subpage_t {
127 MemoryRegion iomem;
acc9d80b 128 AddressSpace *as;
90260c6c
JK
129 hwaddr base;
130 uint16_t sub_section[TARGET_PAGE_SIZE];
131} subpage_t;
132
b41aac4f
LPF
133#define PHYS_SECTION_UNASSIGNED 0
134#define PHYS_SECTION_NOTDIRTY 1
135#define PHYS_SECTION_ROM 2
136#define PHYS_SECTION_WATCH 3
5312bd8b 137
e2eef170 138static void io_mem_init(void);
62152b8a 139static void memory_map_init(void);
09daed84 140static void tcg_commit(MemoryListener *listener);
e2eef170 141
1ec9b909 142static MemoryRegion io_mem_watch;
6658ffb8 143#endif
fd6ce8f6 144
6d9a1304 145#if !defined(CONFIG_USER_ONLY)
d6f2ea22 146
53cb28cb 147static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 148{
53cb28cb
MA
149 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
150 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
151 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
152 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 153 }
f7bf5461
AK
154}
155
53cb28cb 156static uint32_t phys_map_node_alloc(PhysPageMap *map)
f7bf5461
AK
157{
158 unsigned i;
8b795765 159 uint32_t ret;
f7bf5461 160
53cb28cb 161 ret = map->nodes_nb++;
f7bf5461 162 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 163 assert(ret != map->nodes_nb_alloc);
03f49957 164 for (i = 0; i < P_L2_SIZE; ++i) {
53cb28cb
MA
165 map->nodes[ret][i].skip = 1;
166 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 167 }
f7bf5461 168 return ret;
d6f2ea22
AK
169}
170
53cb28cb
MA
171static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
172 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 173 int level)
f7bf5461
AK
174{
175 PhysPageEntry *p;
176 int i;
03f49957 177 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 178
9736e55b 179 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
53cb28cb
MA
180 lp->ptr = phys_map_node_alloc(map);
181 p = map->nodes[lp->ptr];
f7bf5461 182 if (level == 0) {
03f49957 183 for (i = 0; i < P_L2_SIZE; i++) {
9736e55b 184 p[i].skip = 0;
b41aac4f 185 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 186 }
67c4d23c 187 }
f7bf5461 188 } else {
53cb28cb 189 p = map->nodes[lp->ptr];
92e873b9 190 }
03f49957 191 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 192
03f49957 193 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 194 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 195 lp->skip = 0;
c19e8800 196 lp->ptr = leaf;
07f07b31
AK
197 *index += step;
198 *nb -= step;
2999097b 199 } else {
53cb28cb 200 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
201 }
202 ++lp;
f7bf5461
AK
203 }
204}
205
ac1970fb 206static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 207 hwaddr index, hwaddr nb,
2999097b 208 uint16_t leaf)
f7bf5461 209{
2999097b 210 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 211 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 212
53cb28cb 213 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
214}
215
b35ba30f
MT
216/* Compact a non leaf page entry. Simply detect that the entry has a single child,
217 * and update our entry so we can skip it and go directly to the destination.
218 */
219static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
220{
221 unsigned valid_ptr = P_L2_SIZE;
222 int valid = 0;
223 PhysPageEntry *p;
224 int i;
225
226 if (lp->ptr == PHYS_MAP_NODE_NIL) {
227 return;
228 }
229
230 p = nodes[lp->ptr];
231 for (i = 0; i < P_L2_SIZE; i++) {
232 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
233 continue;
234 }
235
236 valid_ptr = i;
237 valid++;
238 if (p[i].skip) {
239 phys_page_compact(&p[i], nodes, compacted);
240 }
241 }
242
243 /* We can only compress if there's only one child. */
244 if (valid != 1) {
245 return;
246 }
247
248 assert(valid_ptr < P_L2_SIZE);
249
250 /* Don't compress if it won't fit in the # of bits we have. */
251 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
252 return;
253 }
254
255 lp->ptr = p[valid_ptr].ptr;
256 if (!p[valid_ptr].skip) {
257 /* If our only child is a leaf, make this a leaf. */
258 /* By design, we should have made this node a leaf to begin with so we
259 * should never reach here.
260 * But since it's so simple to handle this, let's do it just in case we
261 * change this rule.
262 */
263 lp->skip = 0;
264 } else {
265 lp->skip += p[valid_ptr].skip;
266 }
267}
268
269static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
270{
271 DECLARE_BITMAP(compacted, nodes_nb);
272
273 if (d->phys_map.skip) {
53cb28cb 274 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
275 }
276}
277
97115a8d 278static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 279 Node *nodes, MemoryRegionSection *sections)
92e873b9 280{
31ab2b4a 281 PhysPageEntry *p;
97115a8d 282 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 283 int i;
f1f6e3b8 284
9736e55b 285 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 286 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 287 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 288 }
9affd6fc 289 p = nodes[lp.ptr];
03f49957 290 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 291 }
b35ba30f
MT
292
293 if (sections[lp.ptr].size.hi ||
294 range_covers_byte(sections[lp.ptr].offset_within_address_space,
295 sections[lp.ptr].size.lo, addr)) {
296 return &sections[lp.ptr];
297 } else {
298 return &sections[PHYS_SECTION_UNASSIGNED];
299 }
f3705d53
AK
300}
301
e5548617
BS
302bool memory_region_is_unassigned(MemoryRegion *mr)
303{
2a8e7499 304 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 305 && mr != &io_mem_watch;
fd6ce8f6 306}
149f54b5 307
c7086b4a 308static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
309 hwaddr addr,
310 bool resolve_subpage)
9f029603 311{
90260c6c
JK
312 MemoryRegionSection *section;
313 subpage_t *subpage;
314
53cb28cb 315 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
316 if (resolve_subpage && section->mr->subpage) {
317 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 318 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
319 }
320 return section;
9f029603
JK
321}
322
90260c6c 323static MemoryRegionSection *
c7086b4a 324address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 325 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
326{
327 MemoryRegionSection *section;
a87f3954 328 Int128 diff;
149f54b5 329
c7086b4a 330 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
331 /* Compute offset within MemoryRegionSection */
332 addr -= section->offset_within_address_space;
333
334 /* Compute offset within MemoryRegion */
335 *xlat = addr + section->offset_within_region;
336
337 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 338 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
339 return section;
340}
90260c6c 341
a87f3954
PB
342static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
343{
344 if (memory_region_is_ram(mr)) {
345 return !(is_write && mr->readonly);
346 }
347 if (memory_region_is_romd(mr)) {
348 return !is_write;
349 }
350
351 return false;
352}
353
5c8a00ce
PB
354MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
355 hwaddr *xlat, hwaddr *plen,
356 bool is_write)
90260c6c 357{
30951157
AK
358 IOMMUTLBEntry iotlb;
359 MemoryRegionSection *section;
360 MemoryRegion *mr;
361 hwaddr len = *plen;
362
363 for (;;) {
a87f3954 364 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
365 mr = section->mr;
366
367 if (!mr->iommu_ops) {
368 break;
369 }
370
371 iotlb = mr->iommu_ops->translate(mr, addr);
372 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
373 | (addr & iotlb.addr_mask));
374 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
375 if (!(iotlb.perm & (1 << is_write))) {
376 mr = &io_mem_unassigned;
377 break;
378 }
379
380 as = iotlb.target_as;
381 }
382
fe680d0d 383 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954
PB
384 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
385 len = MIN(page, len);
386 }
387
30951157
AK
388 *plen = len;
389 *xlat = addr;
390 return mr;
90260c6c
JK
391}
392
393MemoryRegionSection *
394address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
395 hwaddr *plen)
396{
30951157 397 MemoryRegionSection *section;
c7086b4a 398 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
399
400 assert(!section->mr->iommu_ops);
401 return section;
90260c6c 402}
5b6dd868 403#endif
fd6ce8f6 404
5b6dd868 405void cpu_exec_init_all(void)
fdbb84d1 406{
5b6dd868 407#if !defined(CONFIG_USER_ONLY)
b2a8658e 408 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
409 memory_map_init();
410 io_mem_init();
fdbb84d1 411#endif
5b6dd868 412}
fdbb84d1 413
b170fce3 414#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
415
416static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 417{
259186a7 418 CPUState *cpu = opaque;
a513fe19 419
5b6dd868
BS
420 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
421 version_id is increased. */
259186a7 422 cpu->interrupt_request &= ~0x01;
c01a71c1 423 tlb_flush(cpu, 1);
5b6dd868
BS
424
425 return 0;
a513fe19 426}
7501267e 427
1a1562f5 428const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
429 .name = "cpu_common",
430 .version_id = 1,
431 .minimum_version_id = 1,
5b6dd868 432 .post_load = cpu_common_post_load,
35d08458 433 .fields = (VMStateField[]) {
259186a7
AF
434 VMSTATE_UINT32(halted, CPUState),
435 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
436 VMSTATE_END_OF_LIST()
437 }
438};
1a1562f5 439
5b6dd868 440#endif
ea041c0e 441
38d8f5c8 442CPUState *qemu_get_cpu(int index)
ea041c0e 443{
bdc44640 444 CPUState *cpu;
ea041c0e 445
bdc44640 446 CPU_FOREACH(cpu) {
55e5c285 447 if (cpu->cpu_index == index) {
bdc44640 448 return cpu;
55e5c285 449 }
ea041c0e 450 }
5b6dd868 451
bdc44640 452 return NULL;
ea041c0e
FB
453}
454
09daed84
EI
455#if !defined(CONFIG_USER_ONLY)
456void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
457{
458 /* We only support one address space per cpu at the moment. */
459 assert(cpu->as == as);
460
461 if (cpu->tcg_as_listener) {
462 memory_listener_unregister(cpu->tcg_as_listener);
463 } else {
464 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
465 }
466 cpu->tcg_as_listener->commit = tcg_commit;
467 memory_listener_register(cpu->tcg_as_listener, as);
468}
469#endif
470
5b6dd868 471void cpu_exec_init(CPUArchState *env)
ea041c0e 472{
5b6dd868 473 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 474 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 475 CPUState *some_cpu;
5b6dd868
BS
476 int cpu_index;
477
478#if defined(CONFIG_USER_ONLY)
479 cpu_list_lock();
480#endif
5b6dd868 481 cpu_index = 0;
bdc44640 482 CPU_FOREACH(some_cpu) {
5b6dd868
BS
483 cpu_index++;
484 }
55e5c285 485 cpu->cpu_index = cpu_index;
1b1ed8dc 486 cpu->numa_node = 0;
f0c3c505 487 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 488 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 489#ifndef CONFIG_USER_ONLY
09daed84 490 cpu->as = &address_space_memory;
5b6dd868
BS
491 cpu->thread_id = qemu_get_thread_id();
492#endif
bdc44640 493 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
494#if defined(CONFIG_USER_ONLY)
495 cpu_list_unlock();
496#endif
e0d47944
AF
497 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
498 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
499 }
5b6dd868 500#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
501 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
502 cpu_save, cpu_load, env);
b170fce3 503 assert(cc->vmsd == NULL);
e0d47944 504 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 505#endif
b170fce3
AF
506 if (cc->vmsd != NULL) {
507 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
508 }
ea041c0e
FB
509}
510
1fddef4b 511#if defined(TARGET_HAS_ICE)
94df27fd 512#if defined(CONFIG_USER_ONLY)
00b941e5 513static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
514{
515 tb_invalidate_phys_page_range(pc, pc + 1, 0);
516}
517#else
00b941e5 518static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 519{
e8262a1b
MF
520 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
521 if (phys != -1) {
09daed84 522 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 523 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 524 }
1e7855a5 525}
c27004ec 526#endif
94df27fd 527#endif /* TARGET_HAS_ICE */
d720b93d 528
c527ee8f 529#if defined(CONFIG_USER_ONLY)
75a34036 530void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
531
532{
533}
534
75a34036 535int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
536 int flags, CPUWatchpoint **watchpoint)
537{
538 return -ENOSYS;
539}
540#else
6658ffb8 541/* Add a watchpoint. */
75a34036 542int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 543 int flags, CPUWatchpoint **watchpoint)
6658ffb8 544{
75a34036 545 vaddr len_mask = ~(len - 1);
c0ce998e 546 CPUWatchpoint *wp;
6658ffb8 547
b4051334 548 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
549 if ((len & (len - 1)) || (addr & ~len_mask) ||
550 len == 0 || len > TARGET_PAGE_SIZE) {
75a34036
AF
551 error_report("tried to set invalid watchpoint at %"
552 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
553 return -EINVAL;
554 }
7267c094 555 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
556
557 wp->vaddr = addr;
b4051334 558 wp->len_mask = len_mask;
a1d1bb31
AL
559 wp->flags = flags;
560
2dc9f411 561 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
562 if (flags & BP_GDB) {
563 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
564 } else {
565 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
566 }
6658ffb8 567
31b030d4 568 tlb_flush_page(cpu, addr);
a1d1bb31
AL
569
570 if (watchpoint)
571 *watchpoint = wp;
572 return 0;
6658ffb8
PB
573}
574
a1d1bb31 575/* Remove a specific watchpoint. */
75a34036 576int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 577 int flags)
6658ffb8 578{
75a34036 579 vaddr len_mask = ~(len - 1);
a1d1bb31 580 CPUWatchpoint *wp;
6658ffb8 581
ff4700b0 582 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
b4051334 583 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 584 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 585 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
586 return 0;
587 }
588 }
a1d1bb31 589 return -ENOENT;
6658ffb8
PB
590}
591
a1d1bb31 592/* Remove a specific watchpoint by reference. */
75a34036 593void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 594{
ff4700b0 595 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 596
31b030d4 597 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 598
7267c094 599 g_free(watchpoint);
a1d1bb31
AL
600}
601
602/* Remove all matching watchpoints. */
75a34036 603void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 604{
c0ce998e 605 CPUWatchpoint *wp, *next;
a1d1bb31 606
ff4700b0 607 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
608 if (wp->flags & mask) {
609 cpu_watchpoint_remove_by_ref(cpu, wp);
610 }
c0ce998e 611 }
7d03f82f 612}
c527ee8f 613#endif
7d03f82f 614
a1d1bb31 615/* Add a breakpoint. */
b3310ab3 616int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 617 CPUBreakpoint **breakpoint)
4c3a88a2 618{
1fddef4b 619#if defined(TARGET_HAS_ICE)
c0ce998e 620 CPUBreakpoint *bp;
3b46e624 621
7267c094 622 bp = g_malloc(sizeof(*bp));
4c3a88a2 623
a1d1bb31
AL
624 bp->pc = pc;
625 bp->flags = flags;
626
2dc9f411 627 /* keep all GDB-injected breakpoints in front */
00b941e5 628 if (flags & BP_GDB) {
f0c3c505 629 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 630 } else {
f0c3c505 631 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 632 }
3b46e624 633
f0c3c505 634 breakpoint_invalidate(cpu, pc);
a1d1bb31 635
00b941e5 636 if (breakpoint) {
a1d1bb31 637 *breakpoint = bp;
00b941e5 638 }
4c3a88a2
FB
639 return 0;
640#else
a1d1bb31 641 return -ENOSYS;
4c3a88a2
FB
642#endif
643}
644
a1d1bb31 645/* Remove a specific breakpoint. */
b3310ab3 646int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 647{
7d03f82f 648#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
649 CPUBreakpoint *bp;
650
f0c3c505 651 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 652 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 653 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
654 return 0;
655 }
7d03f82f 656 }
a1d1bb31
AL
657 return -ENOENT;
658#else
659 return -ENOSYS;
7d03f82f
EI
660#endif
661}
662
a1d1bb31 663/* Remove a specific breakpoint by reference. */
b3310ab3 664void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 665{
1fddef4b 666#if defined(TARGET_HAS_ICE)
f0c3c505
AF
667 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
668
669 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 670
7267c094 671 g_free(breakpoint);
a1d1bb31
AL
672#endif
673}
674
675/* Remove all matching breakpoints. */
b3310ab3 676void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31
AL
677{
678#if defined(TARGET_HAS_ICE)
c0ce998e 679 CPUBreakpoint *bp, *next;
a1d1bb31 680
f0c3c505 681 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
682 if (bp->flags & mask) {
683 cpu_breakpoint_remove_by_ref(cpu, bp);
684 }
c0ce998e 685 }
4c3a88a2
FB
686#endif
687}
688
c33a346e
FB
689/* enable or disable single step mode. EXCP_DEBUG is returned by the
690 CPU loop after each instruction */
3825b28f 691void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 692{
1fddef4b 693#if defined(TARGET_HAS_ICE)
ed2803da
AF
694 if (cpu->singlestep_enabled != enabled) {
695 cpu->singlestep_enabled = enabled;
696 if (kvm_enabled()) {
38e478ec 697 kvm_update_guest_debug(cpu, 0);
ed2803da 698 } else {
ccbb4d44 699 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 700 /* XXX: only flush what is necessary */
38e478ec 701 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
702 tb_flush(env);
703 }
c33a346e
FB
704 }
705#endif
706}
707
a47dddd7 708void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
709{
710 va_list ap;
493ae1f0 711 va_list ap2;
7501267e
FB
712
713 va_start(ap, fmt);
493ae1f0 714 va_copy(ap2, ap);
7501267e
FB
715 fprintf(stderr, "qemu: fatal: ");
716 vfprintf(stderr, fmt, ap);
717 fprintf(stderr, "\n");
878096ee 718 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
719 if (qemu_log_enabled()) {
720 qemu_log("qemu: fatal: ");
721 qemu_log_vprintf(fmt, ap2);
722 qemu_log("\n");
a0762859 723 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 724 qemu_log_flush();
93fcfe39 725 qemu_log_close();
924edcae 726 }
493ae1f0 727 va_end(ap2);
f9373291 728 va_end(ap);
fd052bf6
RV
729#if defined(CONFIG_USER_ONLY)
730 {
731 struct sigaction act;
732 sigfillset(&act.sa_mask);
733 act.sa_handler = SIG_DFL;
734 sigaction(SIGABRT, &act, NULL);
735 }
736#endif
7501267e
FB
737 abort();
738}
739
0124311e 740#if !defined(CONFIG_USER_ONLY)
041603fe
PB
741static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
742{
743 RAMBlock *block;
744
745 /* The list is protected by the iothread lock here. */
746 block = ram_list.mru_block;
747 if (block && addr - block->offset < block->length) {
748 goto found;
749 }
750 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
751 if (addr - block->offset < block->length) {
752 goto found;
753 }
754 }
755
756 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
757 abort();
758
759found:
760 ram_list.mru_block = block;
761 return block;
762}
763
a2f4d5be 764static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 765{
041603fe 766 ram_addr_t start1;
a2f4d5be
JQ
767 RAMBlock *block;
768 ram_addr_t end;
769
770 end = TARGET_PAGE_ALIGN(start + length);
771 start &= TARGET_PAGE_MASK;
d24981d3 772
041603fe
PB
773 block = qemu_get_ram_block(start);
774 assert(block == qemu_get_ram_block(end - 1));
775 start1 = (uintptr_t)block->host + (start - block->offset);
776 cpu_tlb_reset_dirty_all(start1, length);
d24981d3
JQ
777}
778
5579c7f3 779/* Note: start and end must be within the same ram block. */
a2f4d5be 780void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 781 unsigned client)
1ccde1cb 782{
1ccde1cb
FB
783 if (length == 0)
784 return;
ace694cc 785 cpu_physical_memory_clear_dirty_range(start, length, client);
f23db169 786
d24981d3 787 if (tcg_enabled()) {
a2f4d5be 788 tlb_reset_dirty_range_all(start, length);
5579c7f3 789 }
1ccde1cb
FB
790}
791
981fdf23 792static void cpu_physical_memory_set_dirty_tracking(bool enable)
74576198
AL
793{
794 in_migration = enable;
74576198
AL
795}
796
bb0e627a 797hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
798 MemoryRegionSection *section,
799 target_ulong vaddr,
800 hwaddr paddr, hwaddr xlat,
801 int prot,
802 target_ulong *address)
e5548617 803{
a8170e5e 804 hwaddr iotlb;
e5548617
BS
805 CPUWatchpoint *wp;
806
cc5bea60 807 if (memory_region_is_ram(section->mr)) {
e5548617
BS
808 /* Normal RAM. */
809 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 810 + xlat;
e5548617 811 if (!section->readonly) {
b41aac4f 812 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 813 } else {
b41aac4f 814 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
815 }
816 } else {
1b3fb98f 817 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 818 iotlb += xlat;
e5548617
BS
819 }
820
821 /* Make accesses to pages with watchpoints go via the
822 watchpoint trap routines. */
ff4700b0 823 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
e5548617
BS
824 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
825 /* Avoid trapping reads of pages with a write breakpoint. */
826 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 827 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
828 *address |= TLB_MMIO;
829 break;
830 }
831 }
832 }
833
834 return iotlb;
835}
9fa3e853
FB
836#endif /* defined(CONFIG_USER_ONLY) */
837
e2eef170 838#if !defined(CONFIG_USER_ONLY)
8da3ff18 839
c227f099 840static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 841 uint16_t section);
acc9d80b 842static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 843
575ddeb4 844static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
91138037
MA
845
846/*
847 * Set a custom physical guest memory alloator.
848 * Accelerators with unusual needs may need this. Hopefully, we can
849 * get rid of it eventually.
850 */
575ddeb4 851void phys_mem_set_alloc(void *(*alloc)(size_t))
91138037
MA
852{
853 phys_mem_alloc = alloc;
854}
855
53cb28cb
MA
856static uint16_t phys_section_add(PhysPageMap *map,
857 MemoryRegionSection *section)
5312bd8b 858{
68f3f65b
PB
859 /* The physical section number is ORed with a page-aligned
860 * pointer to produce the iotlb entries. Thus it should
861 * never overflow into the page-aligned value.
862 */
53cb28cb 863 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 864
53cb28cb
MA
865 if (map->sections_nb == map->sections_nb_alloc) {
866 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
867 map->sections = g_renew(MemoryRegionSection, map->sections,
868 map->sections_nb_alloc);
5312bd8b 869 }
53cb28cb 870 map->sections[map->sections_nb] = *section;
dfde4e6e 871 memory_region_ref(section->mr);
53cb28cb 872 return map->sections_nb++;
5312bd8b
AK
873}
874
058bc4b5
PB
875static void phys_section_destroy(MemoryRegion *mr)
876{
dfde4e6e
PB
877 memory_region_unref(mr);
878
058bc4b5
PB
879 if (mr->subpage) {
880 subpage_t *subpage = container_of(mr, subpage_t, iomem);
881 memory_region_destroy(&subpage->iomem);
882 g_free(subpage);
883 }
884}
885
6092666e 886static void phys_sections_free(PhysPageMap *map)
5312bd8b 887{
9affd6fc
PB
888 while (map->sections_nb > 0) {
889 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
890 phys_section_destroy(section->mr);
891 }
9affd6fc
PB
892 g_free(map->sections);
893 g_free(map->nodes);
5312bd8b
AK
894}
895
ac1970fb 896static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
897{
898 subpage_t *subpage;
a8170e5e 899 hwaddr base = section->offset_within_address_space
0f0cb164 900 & TARGET_PAGE_MASK;
97115a8d 901 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 902 d->map.nodes, d->map.sections);
0f0cb164
AK
903 MemoryRegionSection subsection = {
904 .offset_within_address_space = base,
052e87b0 905 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 906 };
a8170e5e 907 hwaddr start, end;
0f0cb164 908
f3705d53 909 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 910
f3705d53 911 if (!(existing->mr->subpage)) {
acc9d80b 912 subpage = subpage_init(d->as, base);
3be91e86 913 subsection.address_space = d->as;
0f0cb164 914 subsection.mr = &subpage->iomem;
ac1970fb 915 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 916 phys_section_add(&d->map, &subsection));
0f0cb164 917 } else {
f3705d53 918 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
919 }
920 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 921 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
922 subpage_register(subpage, start, end,
923 phys_section_add(&d->map, section));
0f0cb164
AK
924}
925
926
052e87b0
PB
927static void register_multipage(AddressSpaceDispatch *d,
928 MemoryRegionSection *section)
33417e70 929{
a8170e5e 930 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 931 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
932 uint64_t num_pages = int128_get64(int128_rshift(section->size,
933 TARGET_PAGE_BITS));
dd81124b 934
733d5ef5
PB
935 assert(num_pages);
936 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
937}
938
ac1970fb 939static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 940{
89ae337a 941 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 942 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 943 MemoryRegionSection now = *section, remain = *section;
052e87b0 944 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 945
733d5ef5
PB
946 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
947 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
948 - now.offset_within_address_space;
949
052e87b0 950 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 951 register_subpage(d, &now);
733d5ef5 952 } else {
052e87b0 953 now.size = int128_zero();
733d5ef5 954 }
052e87b0
PB
955 while (int128_ne(remain.size, now.size)) {
956 remain.size = int128_sub(remain.size, now.size);
957 remain.offset_within_address_space += int128_get64(now.size);
958 remain.offset_within_region += int128_get64(now.size);
69b67646 959 now = remain;
052e87b0 960 if (int128_lt(remain.size, page_size)) {
733d5ef5 961 register_subpage(d, &now);
88266249 962 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 963 now.size = page_size;
ac1970fb 964 register_subpage(d, &now);
69b67646 965 } else {
052e87b0 966 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 967 register_multipage(d, &now);
69b67646 968 }
0f0cb164
AK
969 }
970}
971
62a2744c
SY
972void qemu_flush_coalesced_mmio_buffer(void)
973{
974 if (kvm_enabled())
975 kvm_flush_coalesced_mmio_buffer();
976}
977
b2a8658e
UD
978void qemu_mutex_lock_ramlist(void)
979{
980 qemu_mutex_lock(&ram_list.mutex);
981}
982
983void qemu_mutex_unlock_ramlist(void)
984{
985 qemu_mutex_unlock(&ram_list.mutex);
986}
987
e1e84ba0 988#ifdef __linux__
c902760f
MT
989
990#include <sys/vfs.h>
991
992#define HUGETLBFS_MAGIC 0x958458f6
993
994static long gethugepagesize(const char *path)
995{
996 struct statfs fs;
997 int ret;
998
999 do {
9742bf26 1000 ret = statfs(path, &fs);
c902760f
MT
1001 } while (ret != 0 && errno == EINTR);
1002
1003 if (ret != 0) {
9742bf26
YT
1004 perror(path);
1005 return 0;
c902760f
MT
1006 }
1007
1008 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1009 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1010
1011 return fs.f_bsize;
1012}
1013
ef36fa14
MT
1014static sigjmp_buf sigjump;
1015
1016static void sigbus_handler(int signal)
1017{
1018 siglongjmp(sigjump, 1);
1019}
1020
04b16653
AW
1021static void *file_ram_alloc(RAMBlock *block,
1022 ram_addr_t memory,
1023 const char *path)
c902760f
MT
1024{
1025 char *filename;
8ca761f6
PF
1026 char *sanitized_name;
1027 char *c;
c902760f
MT
1028 void *area;
1029 int fd;
c902760f
MT
1030 unsigned long hpagesize;
1031
1032 hpagesize = gethugepagesize(path);
1033 if (!hpagesize) {
f9a49dfa 1034 goto error;
c902760f
MT
1035 }
1036
1037 if (memory < hpagesize) {
1038 return NULL;
1039 }
1040
1041 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1042 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
f9a49dfa 1043 goto error;
c902760f
MT
1044 }
1045
8ca761f6
PF
1046 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1047 sanitized_name = g_strdup(block->mr->name);
1048 for (c = sanitized_name; *c != '\0'; c++) {
1049 if (*c == '/')
1050 *c = '_';
1051 }
1052
1053 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1054 sanitized_name);
1055 g_free(sanitized_name);
c902760f
MT
1056
1057 fd = mkstemp(filename);
1058 if (fd < 0) {
9742bf26 1059 perror("unable to create backing store for hugepages");
e4ada482 1060 g_free(filename);
f9a49dfa 1061 goto error;
c902760f
MT
1062 }
1063 unlink(filename);
e4ada482 1064 g_free(filename);
c902760f
MT
1065
1066 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1067
1068 /*
1069 * ftruncate is not supported by hugetlbfs in older
1070 * hosts, so don't bother bailing out on errors.
1071 * If anything goes wrong with it under other filesystems,
1072 * mmap will fail.
1073 */
1074 if (ftruncate(fd, memory))
9742bf26 1075 perror("ftruncate");
c902760f 1076
c902760f 1077 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
c902760f 1078 if (area == MAP_FAILED) {
9742bf26
YT
1079 perror("file_ram_alloc: can't mmap RAM pages");
1080 close(fd);
f9a49dfa 1081 goto error;
c902760f 1082 }
ef36fa14
MT
1083
1084 if (mem_prealloc) {
1085 int ret, i;
1086 struct sigaction act, oldact;
1087 sigset_t set, oldset;
1088
1089 memset(&act, 0, sizeof(act));
1090 act.sa_handler = &sigbus_handler;
1091 act.sa_flags = 0;
1092
1093 ret = sigaction(SIGBUS, &act, &oldact);
1094 if (ret) {
1095 perror("file_ram_alloc: failed to install signal handler");
1096 exit(1);
1097 }
1098
1099 /* unblock SIGBUS */
1100 sigemptyset(&set);
1101 sigaddset(&set, SIGBUS);
1102 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1103
1104 if (sigsetjmp(sigjump, 1)) {
1105 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1106 exit(1);
1107 }
1108
1109 /* MAP_POPULATE silently ignores failures */
2ba82852 1110 for (i = 0; i < (memory/hpagesize); i++) {
ef36fa14
MT
1111 memset(area + (hpagesize*i), 0, 1);
1112 }
1113
1114 ret = sigaction(SIGBUS, &oldact, NULL);
1115 if (ret) {
1116 perror("file_ram_alloc: failed to reinstall signal handler");
1117 exit(1);
1118 }
1119
1120 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1121 }
1122
04b16653 1123 block->fd = fd;
c902760f 1124 return area;
f9a49dfa
MT
1125
1126error:
1127 if (mem_prealloc) {
1128 exit(1);
1129 }
1130 return NULL;
c902760f 1131}
e1e84ba0
MA
1132#else
1133static void *file_ram_alloc(RAMBlock *block,
1134 ram_addr_t memory,
1135 const char *path)
1136{
1137 fprintf(stderr, "-mem-path not supported on this host\n");
1138 exit(1);
1139}
c902760f
MT
1140#endif
1141
d17b5288 1142static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1143{
1144 RAMBlock *block, *next_block;
3e837b2c 1145 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1146
49cd9ac6
SH
1147 assert(size != 0); /* it would hand out same offset multiple times */
1148
a3161038 1149 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1150 return 0;
1151
a3161038 1152 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1153 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1154
1155 end = block->offset + block->length;
1156
a3161038 1157 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1158 if (next_block->offset >= end) {
1159 next = MIN(next, next_block->offset);
1160 }
1161 }
1162 if (next - end >= size && next - end < mingap) {
3e837b2c 1163 offset = end;
04b16653
AW
1164 mingap = next - end;
1165 }
1166 }
3e837b2c
AW
1167
1168 if (offset == RAM_ADDR_MAX) {
1169 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1170 (uint64_t)size);
1171 abort();
1172 }
1173
04b16653
AW
1174 return offset;
1175}
1176
652d7ec2 1177ram_addr_t last_ram_offset(void)
d17b5288
AW
1178{
1179 RAMBlock *block;
1180 ram_addr_t last = 0;
1181
a3161038 1182 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1183 last = MAX(last, block->offset + block->length);
1184
1185 return last;
1186}
1187
ddb97f1d
JB
1188static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1189{
1190 int ret;
ddb97f1d
JB
1191
1192 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1193 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1194 "dump-guest-core", true)) {
ddb97f1d
JB
1195 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1196 if (ret) {
1197 perror("qemu_madvise");
1198 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1199 "but dump_guest_core=off specified\n");
1200 }
1201 }
1202}
1203
20cfe881 1204static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1205{
20cfe881 1206 RAMBlock *block;
84b89d78 1207
a3161038 1208 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1209 if (block->offset == addr) {
20cfe881 1210 return block;
c5705a77
AK
1211 }
1212 }
20cfe881
HT
1213
1214 return NULL;
1215}
1216
1217void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1218{
1219 RAMBlock *new_block = find_ram_block(addr);
1220 RAMBlock *block;
1221
c5705a77
AK
1222 assert(new_block);
1223 assert(!new_block->idstr[0]);
84b89d78 1224
09e5ab63
AL
1225 if (dev) {
1226 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1227 if (id) {
1228 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1229 g_free(id);
84b89d78
CM
1230 }
1231 }
1232 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1233
b2a8658e
UD
1234 /* This assumes the iothread lock is taken here too. */
1235 qemu_mutex_lock_ramlist();
a3161038 1236 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1237 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1238 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1239 new_block->idstr);
1240 abort();
1241 }
1242 }
b2a8658e 1243 qemu_mutex_unlock_ramlist();
c5705a77
AK
1244}
1245
20cfe881
HT
1246void qemu_ram_unset_idstr(ram_addr_t addr)
1247{
1248 RAMBlock *block = find_ram_block(addr);
1249
1250 if (block) {
1251 memset(block->idstr, 0, sizeof(block->idstr));
1252 }
1253}
1254
8490fc78
LC
1255static int memory_try_enable_merging(void *addr, size_t len)
1256{
2ff3de68 1257 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1258 /* disabled by the user */
1259 return 0;
1260 }
1261
1262 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1263}
1264
c5705a77
AK
1265ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1266 MemoryRegion *mr)
1267{
abb26d63 1268 RAMBlock *block, *new_block;
2152f5ca
JQ
1269 ram_addr_t old_ram_size, new_ram_size;
1270
1271 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77
AK
1272
1273 size = TARGET_PAGE_ALIGN(size);
1274 new_block = g_malloc0(sizeof(*new_block));
3435f395 1275 new_block->fd = -1;
84b89d78 1276
b2a8658e
UD
1277 /* This assumes the iothread lock is taken here too. */
1278 qemu_mutex_lock_ramlist();
7c637366 1279 new_block->mr = mr;
432d268c 1280 new_block->offset = find_ram_offset(size);
6977dfe6
YT
1281 if (host) {
1282 new_block->host = host;
cd19cfa2 1283 new_block->flags |= RAM_PREALLOC_MASK;
dfeaf2ab
MA
1284 } else if (xen_enabled()) {
1285 if (mem_path) {
1286 fprintf(stderr, "-mem-path not supported with Xen\n");
1287 exit(1);
1288 }
1289 xen_ram_alloc(new_block->offset, size, mr);
6977dfe6
YT
1290 } else {
1291 if (mem_path) {
e1e84ba0
MA
1292 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1293 /*
1294 * file_ram_alloc() needs to allocate just like
1295 * phys_mem_alloc, but we haven't bothered to provide
1296 * a hook there.
1297 */
1298 fprintf(stderr,
1299 "-mem-path not supported with this accelerator\n");
1300 exit(1);
1301 }
6977dfe6 1302 new_block->host = file_ram_alloc(new_block, size, mem_path);
0628c182
MA
1303 }
1304 if (!new_block->host) {
91138037 1305 new_block->host = phys_mem_alloc(size);
39228250
MA
1306 if (!new_block->host) {
1307 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1308 new_block->mr->name, strerror(errno));
1309 exit(1);
1310 }
8490fc78 1311 memory_try_enable_merging(new_block->host, size);
6977dfe6 1312 }
c902760f 1313 }
94a6b54f
PB
1314 new_block->length = size;
1315
abb26d63
PB
1316 /* Keep the list sorted from biggest to smallest block. */
1317 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1318 if (block->length < new_block->length) {
1319 break;
1320 }
1321 }
1322 if (block) {
1323 QTAILQ_INSERT_BEFORE(block, new_block, next);
1324 } else {
1325 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1326 }
0d6d3c87 1327 ram_list.mru_block = NULL;
94a6b54f 1328
f798b07f 1329 ram_list.version++;
b2a8658e 1330 qemu_mutex_unlock_ramlist();
f798b07f 1331
2152f5ca
JQ
1332 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1333
1334 if (new_ram_size > old_ram_size) {
1ab4c8ce
JQ
1335 int i;
1336 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1337 ram_list.dirty_memory[i] =
1338 bitmap_zero_extend(ram_list.dirty_memory[i],
1339 old_ram_size, new_ram_size);
1340 }
2152f5ca 1341 }
75218e7f 1342 cpu_physical_memory_set_dirty_range(new_block->offset, size);
94a6b54f 1343
ddb97f1d 1344 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 1345 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
3e469dbf 1346 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
ddb97f1d 1347
6f0437e8
JK
1348 if (kvm_enabled())
1349 kvm_setup_guest_memory(new_block->host, size);
1350
94a6b54f
PB
1351 return new_block->offset;
1352}
e9a1ab19 1353
c5705a77 1354ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1355{
c5705a77 1356 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1357}
1358
1f2e98b6
AW
1359void qemu_ram_free_from_ptr(ram_addr_t addr)
1360{
1361 RAMBlock *block;
1362
b2a8658e
UD
1363 /* This assumes the iothread lock is taken here too. */
1364 qemu_mutex_lock_ramlist();
a3161038 1365 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1366 if (addr == block->offset) {
a3161038 1367 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1368 ram_list.mru_block = NULL;
f798b07f 1369 ram_list.version++;
7267c094 1370 g_free(block);
b2a8658e 1371 break;
1f2e98b6
AW
1372 }
1373 }
b2a8658e 1374 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1375}
1376
c227f099 1377void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1378{
04b16653
AW
1379 RAMBlock *block;
1380
b2a8658e
UD
1381 /* This assumes the iothread lock is taken here too. */
1382 qemu_mutex_lock_ramlist();
a3161038 1383 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1384 if (addr == block->offset) {
a3161038 1385 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1386 ram_list.mru_block = NULL;
f798b07f 1387 ram_list.version++;
cd19cfa2
HY
1388 if (block->flags & RAM_PREALLOC_MASK) {
1389 ;
dfeaf2ab
MA
1390 } else if (xen_enabled()) {
1391 xen_invalidate_map_cache_entry(block->host);
089f3f76 1392#ifndef _WIN32
3435f395
MA
1393 } else if (block->fd >= 0) {
1394 munmap(block->host, block->length);
1395 close(block->fd);
089f3f76 1396#endif
04b16653 1397 } else {
dfeaf2ab 1398 qemu_anon_ram_free(block->host, block->length);
04b16653 1399 }
7267c094 1400 g_free(block);
b2a8658e 1401 break;
04b16653
AW
1402 }
1403 }
b2a8658e 1404 qemu_mutex_unlock_ramlist();
04b16653 1405
e9a1ab19
FB
1406}
1407
cd19cfa2
HY
1408#ifndef _WIN32
1409void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1410{
1411 RAMBlock *block;
1412 ram_addr_t offset;
1413 int flags;
1414 void *area, *vaddr;
1415
a3161038 1416 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1417 offset = addr - block->offset;
1418 if (offset < block->length) {
1419 vaddr = block->host + offset;
1420 if (block->flags & RAM_PREALLOC_MASK) {
1421 ;
dfeaf2ab
MA
1422 } else if (xen_enabled()) {
1423 abort();
cd19cfa2
HY
1424 } else {
1425 flags = MAP_FIXED;
1426 munmap(vaddr, length);
3435f395 1427 if (block->fd >= 0) {
cd19cfa2 1428#ifdef MAP_POPULATE
3435f395
MA
1429 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1430 MAP_PRIVATE;
fd28aa13 1431#else
3435f395 1432 flags |= MAP_PRIVATE;
cd19cfa2 1433#endif
3435f395
MA
1434 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1435 flags, block->fd, offset);
cd19cfa2 1436 } else {
2eb9fbaa
MA
1437 /*
1438 * Remap needs to match alloc. Accelerators that
1439 * set phys_mem_alloc never remap. If they did,
1440 * we'd need a remap hook here.
1441 */
1442 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1443
cd19cfa2
HY
1444 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1445 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1446 flags, -1, 0);
cd19cfa2
HY
1447 }
1448 if (area != vaddr) {
f15fbc4b
AP
1449 fprintf(stderr, "Could not remap addr: "
1450 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1451 length, addr);
1452 exit(1);
1453 }
8490fc78 1454 memory_try_enable_merging(vaddr, length);
ddb97f1d 1455 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1456 }
1457 return;
1458 }
1459 }
1460}
1461#endif /* !_WIN32 */
1462
1b5ec234
PB
1463/* Return a host pointer to ram allocated with qemu_ram_alloc.
1464 With the exception of the softmmu code in this file, this should
1465 only be used for local memory (e.g. video ram) that the device owns,
1466 and knows it isn't going to access beyond the end of the block.
1467
1468 It should not be used for general purpose DMA.
1469 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1470 */
1471void *qemu_get_ram_ptr(ram_addr_t addr)
1472{
1473 RAMBlock *block = qemu_get_ram_block(addr);
1474
0d6d3c87
PB
1475 if (xen_enabled()) {
1476 /* We need to check if the requested address is in the RAM
1477 * because we don't want to map the entire memory in QEMU.
1478 * In that case just map until the end of the page.
1479 */
1480 if (block->offset == 0) {
1481 return xen_map_cache(addr, 0, 0);
1482 } else if (block->host == NULL) {
1483 block->host =
1484 xen_map_cache(block->offset, block->length, 1);
1485 }
1486 }
1487 return block->host + (addr - block->offset);
dc828ca1
PB
1488}
1489
38bee5dc
SS
1490/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1491 * but takes a size argument */
cb85f7ab 1492static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1493{
8ab934f9
SS
1494 if (*size == 0) {
1495 return NULL;
1496 }
868bb33f 1497 if (xen_enabled()) {
e41d7c69 1498 return xen_map_cache(addr, *size, 1);
868bb33f 1499 } else {
38bee5dc
SS
1500 RAMBlock *block;
1501
a3161038 1502 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1503 if (addr - block->offset < block->length) {
1504 if (addr - block->offset + *size > block->length)
1505 *size = block->length - addr + block->offset;
1506 return block->host + (addr - block->offset);
1507 }
1508 }
1509
1510 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1511 abort();
38bee5dc
SS
1512 }
1513}
1514
7443b437
PB
1515/* Some of the softmmu routines need to translate from a host pointer
1516 (typically a TLB entry) back to a ram offset. */
1b5ec234 1517MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1518{
94a6b54f
PB
1519 RAMBlock *block;
1520 uint8_t *host = ptr;
1521
868bb33f 1522 if (xen_enabled()) {
e41d7c69 1523 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1524 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1525 }
1526
23887b79
PB
1527 block = ram_list.mru_block;
1528 if (block && block->host && host - block->host < block->length) {
1529 goto found;
1530 }
1531
a3161038 1532 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1533 /* This case append when the block is not mapped. */
1534 if (block->host == NULL) {
1535 continue;
1536 }
f471a17e 1537 if (host - block->host < block->length) {
23887b79 1538 goto found;
f471a17e 1539 }
94a6b54f 1540 }
432d268c 1541
1b5ec234 1542 return NULL;
23887b79
PB
1543
1544found:
1545 *ram_addr = block->offset + (host - block->host);
1b5ec234 1546 return block->mr;
e890261f 1547}
f471a17e 1548
a8170e5e 1549static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1550 uint64_t val, unsigned size)
9fa3e853 1551{
52159192 1552 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1553 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1554 }
0e0df1e2
AK
1555 switch (size) {
1556 case 1:
1557 stb_p(qemu_get_ram_ptr(ram_addr), val);
1558 break;
1559 case 2:
1560 stw_p(qemu_get_ram_ptr(ram_addr), val);
1561 break;
1562 case 4:
1563 stl_p(qemu_get_ram_ptr(ram_addr), val);
1564 break;
1565 default:
1566 abort();
3a7d929e 1567 }
52159192
JQ
1568 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
1569 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
f23db169
FB
1570 /* we remove the notdirty callback only if the code has been
1571 flushed */
a2cd8c85 1572 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1573 CPUArchState *env = current_cpu->env_ptr;
93afeade 1574 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1575 }
9fa3e853
FB
1576}
1577
b018ddf6
PB
1578static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1579 unsigned size, bool is_write)
1580{
1581 return is_write;
1582}
1583
0e0df1e2 1584static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1585 .write = notdirty_mem_write,
b018ddf6 1586 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1587 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1588};
1589
0f459d16 1590/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1591static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1592{
93afeade
AF
1593 CPUState *cpu = current_cpu;
1594 CPUArchState *env = cpu->env_ptr;
06d55cc1 1595 target_ulong pc, cs_base;
0f459d16 1596 target_ulong vaddr;
a1d1bb31 1597 CPUWatchpoint *wp;
06d55cc1 1598 int cpu_flags;
0f459d16 1599
ff4700b0 1600 if (cpu->watchpoint_hit) {
06d55cc1
AL
1601 /* We re-entered the check after replacing the TB. Now raise
1602 * the debug interrupt so that is will trigger after the
1603 * current instruction. */
93afeade 1604 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1605 return;
1606 }
93afeade 1607 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1608 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
b4051334
AL
1609 if ((vaddr == (wp->vaddr & len_mask) ||
1610 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28 1611 wp->flags |= BP_WATCHPOINT_HIT;
ff4700b0
AF
1612 if (!cpu->watchpoint_hit) {
1613 cpu->watchpoint_hit = wp;
239c51a5 1614 tb_check_watchpoint(cpu);
6e140f28 1615 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1616 cpu->exception_index = EXCP_DEBUG;
5638d180 1617 cpu_loop_exit(cpu);
6e140f28
AL
1618 } else {
1619 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1620 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1621 cpu_resume_from_signal(cpu, NULL);
6e140f28 1622 }
06d55cc1 1623 }
6e140f28
AL
1624 } else {
1625 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1626 }
1627 }
1628}
1629
6658ffb8
PB
1630/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1631 so these check for a hit then pass through to the normal out-of-line
1632 phys routines. */
a8170e5e 1633static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1634 unsigned size)
6658ffb8 1635{
1ec9b909
AK
1636 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1637 switch (size) {
2c17449b 1638 case 1: return ldub_phys(&address_space_memory, addr);
41701aa4 1639 case 2: return lduw_phys(&address_space_memory, addr);
fdfba1a2 1640 case 4: return ldl_phys(&address_space_memory, addr);
1ec9b909
AK
1641 default: abort();
1642 }
6658ffb8
PB
1643}
1644
a8170e5e 1645static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1646 uint64_t val, unsigned size)
6658ffb8 1647{
1ec9b909
AK
1648 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1649 switch (size) {
67364150 1650 case 1:
db3be60d 1651 stb_phys(&address_space_memory, addr, val);
67364150
MF
1652 break;
1653 case 2:
5ce5944d 1654 stw_phys(&address_space_memory, addr, val);
67364150
MF
1655 break;
1656 case 4:
ab1da857 1657 stl_phys(&address_space_memory, addr, val);
67364150 1658 break;
1ec9b909
AK
1659 default: abort();
1660 }
6658ffb8
PB
1661}
1662
1ec9b909
AK
1663static const MemoryRegionOps watch_mem_ops = {
1664 .read = watch_mem_read,
1665 .write = watch_mem_write,
1666 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1667};
6658ffb8 1668
a8170e5e 1669static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1670 unsigned len)
db7b5426 1671{
acc9d80b
JK
1672 subpage_t *subpage = opaque;
1673 uint8_t buf[4];
791af8c8 1674
db7b5426 1675#if defined(DEBUG_SUBPAGE)
016e9d62 1676 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1677 subpage, len, addr);
db7b5426 1678#endif
acc9d80b
JK
1679 address_space_read(subpage->as, addr + subpage->base, buf, len);
1680 switch (len) {
1681 case 1:
1682 return ldub_p(buf);
1683 case 2:
1684 return lduw_p(buf);
1685 case 4:
1686 return ldl_p(buf);
1687 default:
1688 abort();
1689 }
db7b5426
BS
1690}
1691
a8170e5e 1692static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1693 uint64_t value, unsigned len)
db7b5426 1694{
acc9d80b
JK
1695 subpage_t *subpage = opaque;
1696 uint8_t buf[4];
1697
db7b5426 1698#if defined(DEBUG_SUBPAGE)
016e9d62 1699 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1700 " value %"PRIx64"\n",
1701 __func__, subpage, len, addr, value);
db7b5426 1702#endif
acc9d80b
JK
1703 switch (len) {
1704 case 1:
1705 stb_p(buf, value);
1706 break;
1707 case 2:
1708 stw_p(buf, value);
1709 break;
1710 case 4:
1711 stl_p(buf, value);
1712 break;
1713 default:
1714 abort();
1715 }
1716 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1717}
1718
c353e4cc 1719static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 1720 unsigned len, bool is_write)
c353e4cc 1721{
acc9d80b 1722 subpage_t *subpage = opaque;
c353e4cc 1723#if defined(DEBUG_SUBPAGE)
016e9d62 1724 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 1725 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1726#endif
1727
acc9d80b 1728 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 1729 len, is_write);
c353e4cc
PB
1730}
1731
70c68e44
AK
1732static const MemoryRegionOps subpage_ops = {
1733 .read = subpage_read,
1734 .write = subpage_write,
c353e4cc 1735 .valid.accepts = subpage_accepts,
70c68e44 1736 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1737};
1738
c227f099 1739static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1740 uint16_t section)
db7b5426
BS
1741{
1742 int idx, eidx;
1743
1744 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1745 return -1;
1746 idx = SUBPAGE_IDX(start);
1747 eidx = SUBPAGE_IDX(end);
1748#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1749 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1750 __func__, mmio, start, end, idx, eidx, section);
db7b5426 1751#endif
db7b5426 1752 for (; idx <= eidx; idx++) {
5312bd8b 1753 mmio->sub_section[idx] = section;
db7b5426
BS
1754 }
1755
1756 return 0;
1757}
1758
acc9d80b 1759static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1760{
c227f099 1761 subpage_t *mmio;
db7b5426 1762
7267c094 1763 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1764
acc9d80b 1765 mmio->as = as;
1eec614b 1766 mmio->base = base;
2c9b15ca 1767 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
70c68e44 1768 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1769 mmio->iomem.subpage = true;
db7b5426 1770#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1771 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1772 mmio, base, TARGET_PAGE_SIZE);
db7b5426 1773#endif
b41aac4f 1774 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1775
1776 return mmio;
1777}
1778
a656e22f
PC
1779static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1780 MemoryRegion *mr)
5312bd8b 1781{
a656e22f 1782 assert(as);
5312bd8b 1783 MemoryRegionSection section = {
a656e22f 1784 .address_space = as,
5312bd8b
AK
1785 .mr = mr,
1786 .offset_within_address_space = 0,
1787 .offset_within_region = 0,
052e87b0 1788 .size = int128_2_64(),
5312bd8b
AK
1789 };
1790
53cb28cb 1791 return phys_section_add(map, &section);
5312bd8b
AK
1792}
1793
77717094 1794MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
aa102231 1795{
77717094 1796 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1797}
1798
e9179ce1
AK
1799static void io_mem_init(void)
1800{
2c9b15ca
PB
1801 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1802 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
0e0df1e2 1803 "unassigned", UINT64_MAX);
2c9b15ca 1804 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
0e0df1e2 1805 "notdirty", UINT64_MAX);
2c9b15ca 1806 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1ec9b909 1807 "watch", UINT64_MAX);
e9179ce1
AK
1808}
1809
ac1970fb 1810static void mem_begin(MemoryListener *listener)
00752703
PB
1811{
1812 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
1813 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1814 uint16_t n;
1815
a656e22f 1816 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 1817 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 1818 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 1819 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 1820 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 1821 assert(n == PHYS_SECTION_ROM);
a656e22f 1822 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 1823 assert(n == PHYS_SECTION_WATCH);
00752703 1824
9736e55b 1825 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
1826 d->as = as;
1827 as->next_dispatch = d;
1828}
1829
1830static void mem_commit(MemoryListener *listener)
ac1970fb 1831{
89ae337a 1832 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1833 AddressSpaceDispatch *cur = as->dispatch;
1834 AddressSpaceDispatch *next = as->next_dispatch;
1835
53cb28cb 1836 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 1837
0475d94f 1838 as->dispatch = next;
b41aac4f 1839
53cb28cb
MA
1840 if (cur) {
1841 phys_sections_free(&cur->map);
1842 g_free(cur);
1843 }
9affd6fc
PB
1844}
1845
1d71148e 1846static void tcg_commit(MemoryListener *listener)
50c1e149 1847{
182735ef 1848 CPUState *cpu;
117712c3
AK
1849
1850 /* since each CPU stores ram addresses in its TLB cache, we must
1851 reset the modified entries */
1852 /* XXX: slow ! */
bdc44640 1853 CPU_FOREACH(cpu) {
33bde2e1
EI
1854 /* FIXME: Disentangle the cpu.h circular files deps so we can
1855 directly get the right CPU from listener. */
1856 if (cpu->tcg_as_listener != listener) {
1857 continue;
1858 }
00c8cb0a 1859 tlb_flush(cpu, 1);
117712c3 1860 }
50c1e149
AK
1861}
1862
93632747
AK
1863static void core_log_global_start(MemoryListener *listener)
1864{
981fdf23 1865 cpu_physical_memory_set_dirty_tracking(true);
93632747
AK
1866}
1867
1868static void core_log_global_stop(MemoryListener *listener)
1869{
981fdf23 1870 cpu_physical_memory_set_dirty_tracking(false);
93632747
AK
1871}
1872
93632747 1873static MemoryListener core_memory_listener = {
93632747
AK
1874 .log_global_start = core_log_global_start,
1875 .log_global_stop = core_log_global_stop,
ac1970fb 1876 .priority = 1,
93632747
AK
1877};
1878
ac1970fb
AK
1879void address_space_init_dispatch(AddressSpace *as)
1880{
00752703 1881 as->dispatch = NULL;
89ae337a 1882 as->dispatch_listener = (MemoryListener) {
ac1970fb 1883 .begin = mem_begin,
00752703 1884 .commit = mem_commit,
ac1970fb
AK
1885 .region_add = mem_add,
1886 .region_nop = mem_add,
1887 .priority = 0,
1888 };
89ae337a 1889 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1890}
1891
83f3c251
AK
1892void address_space_destroy_dispatch(AddressSpace *as)
1893{
1894 AddressSpaceDispatch *d = as->dispatch;
1895
89ae337a 1896 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1897 g_free(d);
1898 as->dispatch = NULL;
1899}
1900
62152b8a
AK
1901static void memory_map_init(void)
1902{
7267c094 1903 system_memory = g_malloc(sizeof(*system_memory));
03f49957 1904
57271d63 1905 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 1906 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1907
7267c094 1908 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
1909 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1910 65536);
7dca8043 1911 address_space_init(&address_space_io, system_io, "I/O");
93632747 1912
f6790af6 1913 memory_listener_register(&core_memory_listener, &address_space_memory);
62152b8a
AK
1914}
1915
1916MemoryRegion *get_system_memory(void)
1917{
1918 return system_memory;
1919}
1920
309cb471
AK
1921MemoryRegion *get_system_io(void)
1922{
1923 return system_io;
1924}
1925
e2eef170
PB
1926#endif /* !defined(CONFIG_USER_ONLY) */
1927
13eb76e0
FB
1928/* physical memory access (slow version, mainly for debug) */
1929#if defined(CONFIG_USER_ONLY)
f17ec444 1930int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 1931 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1932{
1933 int l, flags;
1934 target_ulong page;
53a5960a 1935 void * p;
13eb76e0
FB
1936
1937 while (len > 0) {
1938 page = addr & TARGET_PAGE_MASK;
1939 l = (page + TARGET_PAGE_SIZE) - addr;
1940 if (l > len)
1941 l = len;
1942 flags = page_get_flags(page);
1943 if (!(flags & PAGE_VALID))
a68fe89c 1944 return -1;
13eb76e0
FB
1945 if (is_write) {
1946 if (!(flags & PAGE_WRITE))
a68fe89c 1947 return -1;
579a97f7 1948 /* XXX: this code should not depend on lock_user */
72fb7daa 1949 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1950 return -1;
72fb7daa
AJ
1951 memcpy(p, buf, l);
1952 unlock_user(p, addr, l);
13eb76e0
FB
1953 } else {
1954 if (!(flags & PAGE_READ))
a68fe89c 1955 return -1;
579a97f7 1956 /* XXX: this code should not depend on lock_user */
72fb7daa 1957 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1958 return -1;
72fb7daa 1959 memcpy(buf, p, l);
5b257578 1960 unlock_user(p, addr, 0);
13eb76e0
FB
1961 }
1962 len -= l;
1963 buf += l;
1964 addr += l;
1965 }
a68fe89c 1966 return 0;
13eb76e0 1967}
8df1cd07 1968
13eb76e0 1969#else
51d7a9eb 1970
a8170e5e
AK
1971static void invalidate_and_set_dirty(hwaddr addr,
1972 hwaddr length)
51d7a9eb 1973{
a2cd8c85 1974 if (cpu_physical_memory_is_clean(addr)) {
51d7a9eb
AP
1975 /* invalidate code */
1976 tb_invalidate_phys_page_range(addr, addr + length, 0);
1977 /* set dirty bit */
52159192
JQ
1978 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
1979 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
51d7a9eb 1980 }
e226939d 1981 xen_modified_memory(addr, length);
51d7a9eb
AP
1982}
1983
23326164 1984static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 1985{
e1622f4b 1986 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
1987
1988 /* Regions are assumed to support 1-4 byte accesses unless
1989 otherwise specified. */
23326164
RH
1990 if (access_size_max == 0) {
1991 access_size_max = 4;
1992 }
1993
1994 /* Bound the maximum access by the alignment of the address. */
1995 if (!mr->ops->impl.unaligned) {
1996 unsigned align_size_max = addr & -addr;
1997 if (align_size_max != 0 && align_size_max < access_size_max) {
1998 access_size_max = align_size_max;
1999 }
82f2563f 2000 }
23326164
RH
2001
2002 /* Don't attempt accesses larger than the maximum. */
2003 if (l > access_size_max) {
2004 l = access_size_max;
82f2563f 2005 }
098178f2
PB
2006 if (l & (l - 1)) {
2007 l = 1 << (qemu_fls(l) - 1);
2008 }
23326164
RH
2009
2010 return l;
82f2563f
PB
2011}
2012
fd8aaa76 2013bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 2014 int len, bool is_write)
13eb76e0 2015{
149f54b5 2016 hwaddr l;
13eb76e0 2017 uint8_t *ptr;
791af8c8 2018 uint64_t val;
149f54b5 2019 hwaddr addr1;
5c8a00ce 2020 MemoryRegion *mr;
fd8aaa76 2021 bool error = false;
3b46e624 2022
13eb76e0 2023 while (len > 0) {
149f54b5 2024 l = len;
5c8a00ce 2025 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2026
13eb76e0 2027 if (is_write) {
5c8a00ce
PB
2028 if (!memory_access_is_direct(mr, is_write)) {
2029 l = memory_access_size(mr, l, addr1);
4917cf44 2030 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2031 potential bugs */
23326164
RH
2032 switch (l) {
2033 case 8:
2034 /* 64 bit write access */
2035 val = ldq_p(buf);
2036 error |= io_mem_write(mr, addr1, val, 8);
2037 break;
2038 case 4:
1c213d19 2039 /* 32 bit write access */
c27004ec 2040 val = ldl_p(buf);
5c8a00ce 2041 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
2042 break;
2043 case 2:
1c213d19 2044 /* 16 bit write access */
c27004ec 2045 val = lduw_p(buf);
5c8a00ce 2046 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
2047 break;
2048 case 1:
1c213d19 2049 /* 8 bit write access */
c27004ec 2050 val = ldub_p(buf);
5c8a00ce 2051 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
2052 break;
2053 default:
2054 abort();
13eb76e0 2055 }
2bbfa05d 2056 } else {
5c8a00ce 2057 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2058 /* RAM case */
5579c7f3 2059 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2060 memcpy(ptr, buf, l);
51d7a9eb 2061 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
2062 }
2063 } else {
5c8a00ce 2064 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2065 /* I/O case */
5c8a00ce 2066 l = memory_access_size(mr, l, addr1);
23326164
RH
2067 switch (l) {
2068 case 8:
2069 /* 64 bit read access */
2070 error |= io_mem_read(mr, addr1, &val, 8);
2071 stq_p(buf, val);
2072 break;
2073 case 4:
13eb76e0 2074 /* 32 bit read access */
5c8a00ce 2075 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 2076 stl_p(buf, val);
23326164
RH
2077 break;
2078 case 2:
13eb76e0 2079 /* 16 bit read access */
5c8a00ce 2080 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 2081 stw_p(buf, val);
23326164
RH
2082 break;
2083 case 1:
1c213d19 2084 /* 8 bit read access */
5c8a00ce 2085 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 2086 stb_p(buf, val);
23326164
RH
2087 break;
2088 default:
2089 abort();
13eb76e0
FB
2090 }
2091 } else {
2092 /* RAM case */
5c8a00ce 2093 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2094 memcpy(buf, ptr, l);
13eb76e0
FB
2095 }
2096 }
2097 len -= l;
2098 buf += l;
2099 addr += l;
2100 }
fd8aaa76
PB
2101
2102 return error;
13eb76e0 2103}
8df1cd07 2104
fd8aaa76 2105bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2106 const uint8_t *buf, int len)
2107{
fd8aaa76 2108 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2109}
2110
fd8aaa76 2111bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2112{
fd8aaa76 2113 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2114}
2115
2116
a8170e5e 2117void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2118 int len, int is_write)
2119{
fd8aaa76 2120 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2121}
2122
582b55a9
AG
2123enum write_rom_type {
2124 WRITE_DATA,
2125 FLUSH_CACHE,
2126};
2127
2a221651 2128static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2129 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2130{
149f54b5 2131 hwaddr l;
d0ecd2aa 2132 uint8_t *ptr;
149f54b5 2133 hwaddr addr1;
5c8a00ce 2134 MemoryRegion *mr;
3b46e624 2135
d0ecd2aa 2136 while (len > 0) {
149f54b5 2137 l = len;
2a221651 2138 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2139
5c8a00ce
PB
2140 if (!(memory_region_is_ram(mr) ||
2141 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2142 /* do nothing */
2143 } else {
5c8a00ce 2144 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2145 /* ROM/RAM case */
5579c7f3 2146 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2147 switch (type) {
2148 case WRITE_DATA:
2149 memcpy(ptr, buf, l);
2150 invalidate_and_set_dirty(addr1, l);
2151 break;
2152 case FLUSH_CACHE:
2153 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2154 break;
2155 }
d0ecd2aa
FB
2156 }
2157 len -= l;
2158 buf += l;
2159 addr += l;
2160 }
2161}
2162
582b55a9 2163/* used for ROM loading : can write in RAM and ROM */
2a221651 2164void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2165 const uint8_t *buf, int len)
2166{
2a221651 2167 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2168}
2169
2170void cpu_flush_icache_range(hwaddr start, int len)
2171{
2172 /*
2173 * This function should do the same thing as an icache flush that was
2174 * triggered from within the guest. For TCG we are always cache coherent,
2175 * so there is no need to flush anything. For KVM / Xen we need to flush
2176 * the host's instruction cache at least.
2177 */
2178 if (tcg_enabled()) {
2179 return;
2180 }
2181
2a221651
EI
2182 cpu_physical_memory_write_rom_internal(&address_space_memory,
2183 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2184}
2185
6d16c2f8 2186typedef struct {
d3e71559 2187 MemoryRegion *mr;
6d16c2f8 2188 void *buffer;
a8170e5e
AK
2189 hwaddr addr;
2190 hwaddr len;
6d16c2f8
AL
2191} BounceBuffer;
2192
2193static BounceBuffer bounce;
2194
ba223c29
AL
2195typedef struct MapClient {
2196 void *opaque;
2197 void (*callback)(void *opaque);
72cf2d4f 2198 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2199} MapClient;
2200
72cf2d4f
BS
2201static QLIST_HEAD(map_client_list, MapClient) map_client_list
2202 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2203
2204void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2205{
7267c094 2206 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2207
2208 client->opaque = opaque;
2209 client->callback = callback;
72cf2d4f 2210 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2211 return client;
2212}
2213
8b9c99d9 2214static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2215{
2216 MapClient *client = (MapClient *)_client;
2217
72cf2d4f 2218 QLIST_REMOVE(client, link);
7267c094 2219 g_free(client);
ba223c29
AL
2220}
2221
2222static void cpu_notify_map_clients(void)
2223{
2224 MapClient *client;
2225
72cf2d4f
BS
2226 while (!QLIST_EMPTY(&map_client_list)) {
2227 client = QLIST_FIRST(&map_client_list);
ba223c29 2228 client->callback(client->opaque);
34d5e948 2229 cpu_unregister_map_client(client);
ba223c29
AL
2230 }
2231}
2232
51644ab7
PB
2233bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2234{
5c8a00ce 2235 MemoryRegion *mr;
51644ab7
PB
2236 hwaddr l, xlat;
2237
2238 while (len > 0) {
2239 l = len;
5c8a00ce
PB
2240 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2241 if (!memory_access_is_direct(mr, is_write)) {
2242 l = memory_access_size(mr, l, addr);
2243 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2244 return false;
2245 }
2246 }
2247
2248 len -= l;
2249 addr += l;
2250 }
2251 return true;
2252}
2253
6d16c2f8
AL
2254/* Map a physical memory region into a host virtual address.
2255 * May map a subset of the requested range, given by and returned in *plen.
2256 * May return NULL if resources needed to perform the mapping are exhausted.
2257 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2258 * Use cpu_register_map_client() to know when retrying the map operation is
2259 * likely to succeed.
6d16c2f8 2260 */
ac1970fb 2261void *address_space_map(AddressSpace *as,
a8170e5e
AK
2262 hwaddr addr,
2263 hwaddr *plen,
ac1970fb 2264 bool is_write)
6d16c2f8 2265{
a8170e5e 2266 hwaddr len = *plen;
e3127ae0
PB
2267 hwaddr done = 0;
2268 hwaddr l, xlat, base;
2269 MemoryRegion *mr, *this_mr;
2270 ram_addr_t raddr;
6d16c2f8 2271
e3127ae0
PB
2272 if (len == 0) {
2273 return NULL;
2274 }
38bee5dc 2275
e3127ae0
PB
2276 l = len;
2277 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2278 if (!memory_access_is_direct(mr, is_write)) {
2279 if (bounce.buffer) {
2280 return NULL;
6d16c2f8 2281 }
e85d9db5
KW
2282 /* Avoid unbounded allocations */
2283 l = MIN(l, TARGET_PAGE_SIZE);
2284 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2285 bounce.addr = addr;
2286 bounce.len = l;
d3e71559
PB
2287
2288 memory_region_ref(mr);
2289 bounce.mr = mr;
e3127ae0
PB
2290 if (!is_write) {
2291 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2292 }
6d16c2f8 2293
e3127ae0
PB
2294 *plen = l;
2295 return bounce.buffer;
2296 }
2297
2298 base = xlat;
2299 raddr = memory_region_get_ram_addr(mr);
2300
2301 for (;;) {
6d16c2f8
AL
2302 len -= l;
2303 addr += l;
e3127ae0
PB
2304 done += l;
2305 if (len == 0) {
2306 break;
2307 }
2308
2309 l = len;
2310 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2311 if (this_mr != mr || xlat != base + done) {
2312 break;
2313 }
6d16c2f8 2314 }
e3127ae0 2315
d3e71559 2316 memory_region_ref(mr);
e3127ae0
PB
2317 *plen = done;
2318 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2319}
2320
ac1970fb 2321/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2322 * Will also mark the memory as dirty if is_write == 1. access_len gives
2323 * the amount of memory that was actually read or written by the caller.
2324 */
a8170e5e
AK
2325void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2326 int is_write, hwaddr access_len)
6d16c2f8
AL
2327{
2328 if (buffer != bounce.buffer) {
d3e71559
PB
2329 MemoryRegion *mr;
2330 ram_addr_t addr1;
2331
2332 mr = qemu_ram_addr_from_host(buffer, &addr1);
2333 assert(mr != NULL);
6d16c2f8 2334 if (is_write) {
6d16c2f8
AL
2335 while (access_len) {
2336 unsigned l;
2337 l = TARGET_PAGE_SIZE;
2338 if (l > access_len)
2339 l = access_len;
51d7a9eb 2340 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2341 addr1 += l;
2342 access_len -= l;
2343 }
2344 }
868bb33f 2345 if (xen_enabled()) {
e41d7c69 2346 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2347 }
d3e71559 2348 memory_region_unref(mr);
6d16c2f8
AL
2349 return;
2350 }
2351 if (is_write) {
ac1970fb 2352 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2353 }
f8a83245 2354 qemu_vfree(bounce.buffer);
6d16c2f8 2355 bounce.buffer = NULL;
d3e71559 2356 memory_region_unref(bounce.mr);
ba223c29 2357 cpu_notify_map_clients();
6d16c2f8 2358}
d0ecd2aa 2359
a8170e5e
AK
2360void *cpu_physical_memory_map(hwaddr addr,
2361 hwaddr *plen,
ac1970fb
AK
2362 int is_write)
2363{
2364 return address_space_map(&address_space_memory, addr, plen, is_write);
2365}
2366
a8170e5e
AK
2367void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2368 int is_write, hwaddr access_len)
ac1970fb
AK
2369{
2370 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2371}
2372
8df1cd07 2373/* warning: addr must be aligned */
fdfba1a2 2374static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2375 enum device_endian endian)
8df1cd07 2376{
8df1cd07 2377 uint8_t *ptr;
791af8c8 2378 uint64_t val;
5c8a00ce 2379 MemoryRegion *mr;
149f54b5
PB
2380 hwaddr l = 4;
2381 hwaddr addr1;
8df1cd07 2382
fdfba1a2 2383 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2384 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2385 /* I/O case */
5c8a00ce 2386 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2387#if defined(TARGET_WORDS_BIGENDIAN)
2388 if (endian == DEVICE_LITTLE_ENDIAN) {
2389 val = bswap32(val);
2390 }
2391#else
2392 if (endian == DEVICE_BIG_ENDIAN) {
2393 val = bswap32(val);
2394 }
2395#endif
8df1cd07
FB
2396 } else {
2397 /* RAM case */
5c8a00ce 2398 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2399 & TARGET_PAGE_MASK)
149f54b5 2400 + addr1);
1e78bcc1
AG
2401 switch (endian) {
2402 case DEVICE_LITTLE_ENDIAN:
2403 val = ldl_le_p(ptr);
2404 break;
2405 case DEVICE_BIG_ENDIAN:
2406 val = ldl_be_p(ptr);
2407 break;
2408 default:
2409 val = ldl_p(ptr);
2410 break;
2411 }
8df1cd07
FB
2412 }
2413 return val;
2414}
2415
fdfba1a2 2416uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2417{
fdfba1a2 2418 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2419}
2420
fdfba1a2 2421uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2422{
fdfba1a2 2423 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2424}
2425
fdfba1a2 2426uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2427{
fdfba1a2 2428 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2429}
2430
84b7b8e7 2431/* warning: addr must be aligned */
2c17449b 2432static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2433 enum device_endian endian)
84b7b8e7 2434{
84b7b8e7
FB
2435 uint8_t *ptr;
2436 uint64_t val;
5c8a00ce 2437 MemoryRegion *mr;
149f54b5
PB
2438 hwaddr l = 8;
2439 hwaddr addr1;
84b7b8e7 2440
2c17449b 2441 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2442 false);
2443 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2444 /* I/O case */
5c8a00ce 2445 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2446#if defined(TARGET_WORDS_BIGENDIAN)
2447 if (endian == DEVICE_LITTLE_ENDIAN) {
2448 val = bswap64(val);
2449 }
2450#else
2451 if (endian == DEVICE_BIG_ENDIAN) {
2452 val = bswap64(val);
2453 }
84b7b8e7
FB
2454#endif
2455 } else {
2456 /* RAM case */
5c8a00ce 2457 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2458 & TARGET_PAGE_MASK)
149f54b5 2459 + addr1);
1e78bcc1
AG
2460 switch (endian) {
2461 case DEVICE_LITTLE_ENDIAN:
2462 val = ldq_le_p(ptr);
2463 break;
2464 case DEVICE_BIG_ENDIAN:
2465 val = ldq_be_p(ptr);
2466 break;
2467 default:
2468 val = ldq_p(ptr);
2469 break;
2470 }
84b7b8e7
FB
2471 }
2472 return val;
2473}
2474
2c17449b 2475uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2476{
2c17449b 2477 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2478}
2479
2c17449b 2480uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2481{
2c17449b 2482 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2483}
2484
2c17449b 2485uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2486{
2c17449b 2487 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2488}
2489
aab33094 2490/* XXX: optimize */
2c17449b 2491uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
aab33094
FB
2492{
2493 uint8_t val;
2c17449b 2494 address_space_rw(as, addr, &val, 1, 0);
aab33094
FB
2495 return val;
2496}
2497
733f0b02 2498/* warning: addr must be aligned */
41701aa4 2499static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2500 enum device_endian endian)
aab33094 2501{
733f0b02
MT
2502 uint8_t *ptr;
2503 uint64_t val;
5c8a00ce 2504 MemoryRegion *mr;
149f54b5
PB
2505 hwaddr l = 2;
2506 hwaddr addr1;
733f0b02 2507
41701aa4 2508 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2509 false);
2510 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2511 /* I/O case */
5c8a00ce 2512 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2513#if defined(TARGET_WORDS_BIGENDIAN)
2514 if (endian == DEVICE_LITTLE_ENDIAN) {
2515 val = bswap16(val);
2516 }
2517#else
2518 if (endian == DEVICE_BIG_ENDIAN) {
2519 val = bswap16(val);
2520 }
2521#endif
733f0b02
MT
2522 } else {
2523 /* RAM case */
5c8a00ce 2524 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2525 & TARGET_PAGE_MASK)
149f54b5 2526 + addr1);
1e78bcc1
AG
2527 switch (endian) {
2528 case DEVICE_LITTLE_ENDIAN:
2529 val = lduw_le_p(ptr);
2530 break;
2531 case DEVICE_BIG_ENDIAN:
2532 val = lduw_be_p(ptr);
2533 break;
2534 default:
2535 val = lduw_p(ptr);
2536 break;
2537 }
733f0b02
MT
2538 }
2539 return val;
aab33094
FB
2540}
2541
41701aa4 2542uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2543{
41701aa4 2544 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2545}
2546
41701aa4 2547uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2548{
41701aa4 2549 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2550}
2551
41701aa4 2552uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2553{
41701aa4 2554 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2555}
2556
8df1cd07
FB
2557/* warning: addr must be aligned. The ram page is not masked as dirty
2558 and the code inside is not invalidated. It is useful if the dirty
2559 bits are used to track modified PTEs */
2198a121 2560void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
8df1cd07 2561{
8df1cd07 2562 uint8_t *ptr;
5c8a00ce 2563 MemoryRegion *mr;
149f54b5
PB
2564 hwaddr l = 4;
2565 hwaddr addr1;
8df1cd07 2566
2198a121 2567 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2568 true);
2569 if (l < 4 || !memory_access_is_direct(mr, true)) {
2570 io_mem_write(mr, addr1, val, 4);
8df1cd07 2571 } else {
5c8a00ce 2572 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2573 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2574 stl_p(ptr, val);
74576198
AL
2575
2576 if (unlikely(in_migration)) {
a2cd8c85 2577 if (cpu_physical_memory_is_clean(addr1)) {
74576198
AL
2578 /* invalidate code */
2579 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2580 /* set dirty bit */
52159192
JQ
2581 cpu_physical_memory_set_dirty_flag(addr1,
2582 DIRTY_MEMORY_MIGRATION);
2583 cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
74576198
AL
2584 }
2585 }
8df1cd07
FB
2586 }
2587}
2588
2589/* warning: addr must be aligned */
ab1da857
EI
2590static inline void stl_phys_internal(AddressSpace *as,
2591 hwaddr addr, uint32_t val,
1e78bcc1 2592 enum device_endian endian)
8df1cd07 2593{
8df1cd07 2594 uint8_t *ptr;
5c8a00ce 2595 MemoryRegion *mr;
149f54b5
PB
2596 hwaddr l = 4;
2597 hwaddr addr1;
8df1cd07 2598
ab1da857 2599 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2600 true);
2601 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2602#if defined(TARGET_WORDS_BIGENDIAN)
2603 if (endian == DEVICE_LITTLE_ENDIAN) {
2604 val = bswap32(val);
2605 }
2606#else
2607 if (endian == DEVICE_BIG_ENDIAN) {
2608 val = bswap32(val);
2609 }
2610#endif
5c8a00ce 2611 io_mem_write(mr, addr1, val, 4);
8df1cd07 2612 } else {
8df1cd07 2613 /* RAM case */
5c8a00ce 2614 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2615 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2616 switch (endian) {
2617 case DEVICE_LITTLE_ENDIAN:
2618 stl_le_p(ptr, val);
2619 break;
2620 case DEVICE_BIG_ENDIAN:
2621 stl_be_p(ptr, val);
2622 break;
2623 default:
2624 stl_p(ptr, val);
2625 break;
2626 }
51d7a9eb 2627 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2628 }
2629}
2630
ab1da857 2631void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2632{
ab1da857 2633 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2634}
2635
ab1da857 2636void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2637{
ab1da857 2638 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2639}
2640
ab1da857 2641void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2642{
ab1da857 2643 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2644}
2645
aab33094 2646/* XXX: optimize */
db3be60d 2647void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
aab33094
FB
2648{
2649 uint8_t v = val;
db3be60d 2650 address_space_rw(as, addr, &v, 1, 1);
aab33094
FB
2651}
2652
733f0b02 2653/* warning: addr must be aligned */
5ce5944d
EI
2654static inline void stw_phys_internal(AddressSpace *as,
2655 hwaddr addr, uint32_t val,
1e78bcc1 2656 enum device_endian endian)
aab33094 2657{
733f0b02 2658 uint8_t *ptr;
5c8a00ce 2659 MemoryRegion *mr;
149f54b5
PB
2660 hwaddr l = 2;
2661 hwaddr addr1;
733f0b02 2662
5ce5944d 2663 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 2664 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2665#if defined(TARGET_WORDS_BIGENDIAN)
2666 if (endian == DEVICE_LITTLE_ENDIAN) {
2667 val = bswap16(val);
2668 }
2669#else
2670 if (endian == DEVICE_BIG_ENDIAN) {
2671 val = bswap16(val);
2672 }
2673#endif
5c8a00ce 2674 io_mem_write(mr, addr1, val, 2);
733f0b02 2675 } else {
733f0b02 2676 /* RAM case */
5c8a00ce 2677 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2678 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2679 switch (endian) {
2680 case DEVICE_LITTLE_ENDIAN:
2681 stw_le_p(ptr, val);
2682 break;
2683 case DEVICE_BIG_ENDIAN:
2684 stw_be_p(ptr, val);
2685 break;
2686 default:
2687 stw_p(ptr, val);
2688 break;
2689 }
51d7a9eb 2690 invalidate_and_set_dirty(addr1, 2);
733f0b02 2691 }
aab33094
FB
2692}
2693
5ce5944d 2694void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2695{
5ce5944d 2696 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2697}
2698
5ce5944d 2699void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2700{
5ce5944d 2701 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2702}
2703
5ce5944d 2704void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2705{
5ce5944d 2706 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2707}
2708
aab33094 2709/* XXX: optimize */
f606604f 2710void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
aab33094
FB
2711{
2712 val = tswap64(val);
f606604f 2713 address_space_rw(as, addr, (void *) &val, 8, 1);
aab33094
FB
2714}
2715
f606604f 2716void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2717{
2718 val = cpu_to_le64(val);
f606604f 2719 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2720}
2721
f606604f 2722void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2723{
2724 val = cpu_to_be64(val);
f606604f 2725 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2726}
2727
5e2972fd 2728/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2729int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2730 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2731{
2732 int l;
a8170e5e 2733 hwaddr phys_addr;
9b3c35e0 2734 target_ulong page;
13eb76e0
FB
2735
2736 while (len > 0) {
2737 page = addr & TARGET_PAGE_MASK;
f17ec444 2738 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2739 /* if no physical page mapped, return an error */
2740 if (phys_addr == -1)
2741 return -1;
2742 l = (page + TARGET_PAGE_SIZE) - addr;
2743 if (l > len)
2744 l = len;
5e2972fd 2745 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
2746 if (is_write) {
2747 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2748 } else {
2749 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2750 }
13eb76e0
FB
2751 len -= l;
2752 buf += l;
2753 addr += l;
2754 }
2755 return 0;
2756}
a68fe89c 2757#endif
13eb76e0 2758
8e4a424b
BS
2759#if !defined(CONFIG_USER_ONLY)
2760
2761/*
2762 * A helper function for the _utterly broken_ virtio device model to find out if
2763 * it's running on a big endian machine. Don't do this at home kids!
2764 */
2765bool virtio_is_big_endian(void);
2766bool virtio_is_big_endian(void)
2767{
2768#if defined(TARGET_WORDS_BIGENDIAN)
2769 return true;
2770#else
2771 return false;
2772#endif
2773}
2774
2775#endif
2776
76f35538 2777#ifndef CONFIG_USER_ONLY
a8170e5e 2778bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2779{
5c8a00ce 2780 MemoryRegion*mr;
149f54b5 2781 hwaddr l = 1;
76f35538 2782
5c8a00ce
PB
2783 mr = address_space_translate(&address_space_memory,
2784 phys_addr, &phys_addr, &l, false);
76f35538 2785
5c8a00ce
PB
2786 return !(memory_region_is_ram(mr) ||
2787 memory_region_is_romd(mr));
76f35538 2788}
bd2fa51f
MH
2789
2790void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2791{
2792 RAMBlock *block;
2793
2794 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2795 func(block->host, block->offset, block->length, opaque);
2796 }
2797}
ec3f8c99 2798#endif
This page took 1.31205 seconds and 4 git commands to generate.