]> Git Repo - qemu.git/blame - exec.c
rdma: bug fixes
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
cc9e98cb 29#include "hw/qdev.h"
1de7afc9 30#include "qemu/osdep.h"
9c17d615 31#include "sysemu/kvm.h"
2ff3de68 32#include "sysemu/sysemu.h"
0d09e41a 33#include "hw/xen/xen.h"
1de7afc9
PB
34#include "qemu/timer.h"
35#include "qemu/config-file.h"
75a34036 36#include "qemu/error-report.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
220c3ebd 52#include "exec/ram_addr.h"
582b55a9 53#include "qemu/cache-utils.h"
67d95c15 54
b35ba30f
MT
55#include "qemu/range.h"
56
db7b5426 57//#define DEBUG_SUBPAGE
1196be37 58
e2eef170 59#if !defined(CONFIG_USER_ONLY)
981fdf23 60static bool in_migration;
94a6b54f 61
a3161038 62RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
63
64static MemoryRegion *system_memory;
309cb471 65static MemoryRegion *system_io;
62152b8a 66
f6790af6
AK
67AddressSpace address_space_io;
68AddressSpace address_space_memory;
2673a5da 69
0844e007 70MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 71static MemoryRegion io_mem_unassigned;
0e0df1e2 72
7bd4f430
PB
73/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
74#define RAM_PREALLOC (1 << 0)
75
dbcb8981
PB
76/* RAM is mmap-ed with MAP_SHARED */
77#define RAM_SHARED (1 << 1)
78
e2eef170 79#endif
9fa3e853 80
bdc44640 81struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
82/* current CPU in the current thread. It is only valid inside
83 cpu_exec() */
4917cf44 84DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 85/* 0 = Do not count executed instructions.
bf20dc07 86 1 = Precise instruction counting.
2e70f6ef 87 2 = Adaptive rate instruction counting. */
5708fc66 88int use_icount;
6a00d601 89
e2eef170 90#if !defined(CONFIG_USER_ONLY)
4346ae3e 91
1db8abb1
PB
92typedef struct PhysPageEntry PhysPageEntry;
93
94struct PhysPageEntry {
9736e55b 95 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 96 uint32_t skip : 6;
9736e55b 97 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 98 uint32_t ptr : 26;
1db8abb1
PB
99};
100
8b795765
MT
101#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
102
03f49957 103/* Size of the L2 (and L3, etc) page tables. */
57271d63 104#define ADDR_SPACE_BITS 64
03f49957 105
026736ce 106#define P_L2_BITS 9
03f49957
PB
107#define P_L2_SIZE (1 << P_L2_BITS)
108
109#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
110
111typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 112
53cb28cb
MA
113typedef struct PhysPageMap {
114 unsigned sections_nb;
115 unsigned sections_nb_alloc;
116 unsigned nodes_nb;
117 unsigned nodes_nb_alloc;
118 Node *nodes;
119 MemoryRegionSection *sections;
120} PhysPageMap;
121
1db8abb1
PB
122struct AddressSpaceDispatch {
123 /* This is a multi-level map on the physical address space.
124 * The bottom level has pointers to MemoryRegionSections.
125 */
126 PhysPageEntry phys_map;
53cb28cb 127 PhysPageMap map;
acc9d80b 128 AddressSpace *as;
1db8abb1
PB
129};
130
90260c6c
JK
131#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
132typedef struct subpage_t {
133 MemoryRegion iomem;
acc9d80b 134 AddressSpace *as;
90260c6c
JK
135 hwaddr base;
136 uint16_t sub_section[TARGET_PAGE_SIZE];
137} subpage_t;
138
b41aac4f
LPF
139#define PHYS_SECTION_UNASSIGNED 0
140#define PHYS_SECTION_NOTDIRTY 1
141#define PHYS_SECTION_ROM 2
142#define PHYS_SECTION_WATCH 3
5312bd8b 143
e2eef170 144static void io_mem_init(void);
62152b8a 145static void memory_map_init(void);
09daed84 146static void tcg_commit(MemoryListener *listener);
e2eef170 147
1ec9b909 148static MemoryRegion io_mem_watch;
6658ffb8 149#endif
fd6ce8f6 150
6d9a1304 151#if !defined(CONFIG_USER_ONLY)
d6f2ea22 152
53cb28cb 153static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 154{
53cb28cb
MA
155 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
156 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
157 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
158 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 159 }
f7bf5461
AK
160}
161
53cb28cb 162static uint32_t phys_map_node_alloc(PhysPageMap *map)
f7bf5461
AK
163{
164 unsigned i;
8b795765 165 uint32_t ret;
f7bf5461 166
53cb28cb 167 ret = map->nodes_nb++;
f7bf5461 168 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 169 assert(ret != map->nodes_nb_alloc);
03f49957 170 for (i = 0; i < P_L2_SIZE; ++i) {
53cb28cb
MA
171 map->nodes[ret][i].skip = 1;
172 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 173 }
f7bf5461 174 return ret;
d6f2ea22
AK
175}
176
53cb28cb
MA
177static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
178 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 179 int level)
f7bf5461
AK
180{
181 PhysPageEntry *p;
182 int i;
03f49957 183 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 184
9736e55b 185 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
53cb28cb
MA
186 lp->ptr = phys_map_node_alloc(map);
187 p = map->nodes[lp->ptr];
f7bf5461 188 if (level == 0) {
03f49957 189 for (i = 0; i < P_L2_SIZE; i++) {
9736e55b 190 p[i].skip = 0;
b41aac4f 191 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 192 }
67c4d23c 193 }
f7bf5461 194 } else {
53cb28cb 195 p = map->nodes[lp->ptr];
92e873b9 196 }
03f49957 197 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 198
03f49957 199 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 200 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 201 lp->skip = 0;
c19e8800 202 lp->ptr = leaf;
07f07b31
AK
203 *index += step;
204 *nb -= step;
2999097b 205 } else {
53cb28cb 206 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
207 }
208 ++lp;
f7bf5461
AK
209 }
210}
211
ac1970fb 212static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 213 hwaddr index, hwaddr nb,
2999097b 214 uint16_t leaf)
f7bf5461 215{
2999097b 216 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 217 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 218
53cb28cb 219 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
220}
221
b35ba30f
MT
222/* Compact a non leaf page entry. Simply detect that the entry has a single child,
223 * and update our entry so we can skip it and go directly to the destination.
224 */
225static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
226{
227 unsigned valid_ptr = P_L2_SIZE;
228 int valid = 0;
229 PhysPageEntry *p;
230 int i;
231
232 if (lp->ptr == PHYS_MAP_NODE_NIL) {
233 return;
234 }
235
236 p = nodes[lp->ptr];
237 for (i = 0; i < P_L2_SIZE; i++) {
238 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
239 continue;
240 }
241
242 valid_ptr = i;
243 valid++;
244 if (p[i].skip) {
245 phys_page_compact(&p[i], nodes, compacted);
246 }
247 }
248
249 /* We can only compress if there's only one child. */
250 if (valid != 1) {
251 return;
252 }
253
254 assert(valid_ptr < P_L2_SIZE);
255
256 /* Don't compress if it won't fit in the # of bits we have. */
257 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
258 return;
259 }
260
261 lp->ptr = p[valid_ptr].ptr;
262 if (!p[valid_ptr].skip) {
263 /* If our only child is a leaf, make this a leaf. */
264 /* By design, we should have made this node a leaf to begin with so we
265 * should never reach here.
266 * But since it's so simple to handle this, let's do it just in case we
267 * change this rule.
268 */
269 lp->skip = 0;
270 } else {
271 lp->skip += p[valid_ptr].skip;
272 }
273}
274
275static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
276{
277 DECLARE_BITMAP(compacted, nodes_nb);
278
279 if (d->phys_map.skip) {
53cb28cb 280 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
281 }
282}
283
97115a8d 284static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 285 Node *nodes, MemoryRegionSection *sections)
92e873b9 286{
31ab2b4a 287 PhysPageEntry *p;
97115a8d 288 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 289 int i;
f1f6e3b8 290
9736e55b 291 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 292 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 293 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 294 }
9affd6fc 295 p = nodes[lp.ptr];
03f49957 296 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 297 }
b35ba30f
MT
298
299 if (sections[lp.ptr].size.hi ||
300 range_covers_byte(sections[lp.ptr].offset_within_address_space,
301 sections[lp.ptr].size.lo, addr)) {
302 return &sections[lp.ptr];
303 } else {
304 return &sections[PHYS_SECTION_UNASSIGNED];
305 }
f3705d53
AK
306}
307
e5548617
BS
308bool memory_region_is_unassigned(MemoryRegion *mr)
309{
2a8e7499 310 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 311 && mr != &io_mem_watch;
fd6ce8f6 312}
149f54b5 313
c7086b4a 314static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
315 hwaddr addr,
316 bool resolve_subpage)
9f029603 317{
90260c6c
JK
318 MemoryRegionSection *section;
319 subpage_t *subpage;
320
53cb28cb 321 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
322 if (resolve_subpage && section->mr->subpage) {
323 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 324 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
325 }
326 return section;
9f029603
JK
327}
328
90260c6c 329static MemoryRegionSection *
c7086b4a 330address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 331 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
332{
333 MemoryRegionSection *section;
a87f3954 334 Int128 diff;
149f54b5 335
c7086b4a 336 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
337 /* Compute offset within MemoryRegionSection */
338 addr -= section->offset_within_address_space;
339
340 /* Compute offset within MemoryRegion */
341 *xlat = addr + section->offset_within_region;
342
343 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 344 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
345 return section;
346}
90260c6c 347
a87f3954
PB
348static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
349{
350 if (memory_region_is_ram(mr)) {
351 return !(is_write && mr->readonly);
352 }
353 if (memory_region_is_romd(mr)) {
354 return !is_write;
355 }
356
357 return false;
358}
359
5c8a00ce
PB
360MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
361 hwaddr *xlat, hwaddr *plen,
362 bool is_write)
90260c6c 363{
30951157
AK
364 IOMMUTLBEntry iotlb;
365 MemoryRegionSection *section;
366 MemoryRegion *mr;
367 hwaddr len = *plen;
368
369 for (;;) {
a87f3954 370 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
371 mr = section->mr;
372
373 if (!mr->iommu_ops) {
374 break;
375 }
376
377 iotlb = mr->iommu_ops->translate(mr, addr);
378 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
379 | (addr & iotlb.addr_mask));
380 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
381 if (!(iotlb.perm & (1 << is_write))) {
382 mr = &io_mem_unassigned;
383 break;
384 }
385
386 as = iotlb.target_as;
387 }
388
fe680d0d 389 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954
PB
390 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
391 len = MIN(page, len);
392 }
393
30951157
AK
394 *plen = len;
395 *xlat = addr;
396 return mr;
90260c6c
JK
397}
398
399MemoryRegionSection *
400address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
401 hwaddr *plen)
402{
30951157 403 MemoryRegionSection *section;
c7086b4a 404 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
405
406 assert(!section->mr->iommu_ops);
407 return section;
90260c6c 408}
5b6dd868 409#endif
fd6ce8f6 410
5b6dd868 411void cpu_exec_init_all(void)
fdbb84d1 412{
5b6dd868 413#if !defined(CONFIG_USER_ONLY)
b2a8658e 414 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
415 memory_map_init();
416 io_mem_init();
fdbb84d1 417#endif
5b6dd868 418}
fdbb84d1 419
b170fce3 420#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
421
422static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 423{
259186a7 424 CPUState *cpu = opaque;
a513fe19 425
5b6dd868
BS
426 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
427 version_id is increased. */
259186a7 428 cpu->interrupt_request &= ~0x01;
c01a71c1 429 tlb_flush(cpu, 1);
5b6dd868
BS
430
431 return 0;
a513fe19 432}
7501267e 433
1a1562f5 434const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
435 .name = "cpu_common",
436 .version_id = 1,
437 .minimum_version_id = 1,
5b6dd868 438 .post_load = cpu_common_post_load,
35d08458 439 .fields = (VMStateField[]) {
259186a7
AF
440 VMSTATE_UINT32(halted, CPUState),
441 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
442 VMSTATE_END_OF_LIST()
443 }
444};
1a1562f5 445
5b6dd868 446#endif
ea041c0e 447
38d8f5c8 448CPUState *qemu_get_cpu(int index)
ea041c0e 449{
bdc44640 450 CPUState *cpu;
ea041c0e 451
bdc44640 452 CPU_FOREACH(cpu) {
55e5c285 453 if (cpu->cpu_index == index) {
bdc44640 454 return cpu;
55e5c285 455 }
ea041c0e 456 }
5b6dd868 457
bdc44640 458 return NULL;
ea041c0e
FB
459}
460
09daed84
EI
461#if !defined(CONFIG_USER_ONLY)
462void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
463{
464 /* We only support one address space per cpu at the moment. */
465 assert(cpu->as == as);
466
467 if (cpu->tcg_as_listener) {
468 memory_listener_unregister(cpu->tcg_as_listener);
469 } else {
470 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
471 }
472 cpu->tcg_as_listener->commit = tcg_commit;
473 memory_listener_register(cpu->tcg_as_listener, as);
474}
475#endif
476
5b6dd868 477void cpu_exec_init(CPUArchState *env)
ea041c0e 478{
5b6dd868 479 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 480 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 481 CPUState *some_cpu;
5b6dd868
BS
482 int cpu_index;
483
484#if defined(CONFIG_USER_ONLY)
485 cpu_list_lock();
486#endif
5b6dd868 487 cpu_index = 0;
bdc44640 488 CPU_FOREACH(some_cpu) {
5b6dd868
BS
489 cpu_index++;
490 }
55e5c285 491 cpu->cpu_index = cpu_index;
1b1ed8dc 492 cpu->numa_node = 0;
f0c3c505 493 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 494 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 495#ifndef CONFIG_USER_ONLY
09daed84 496 cpu->as = &address_space_memory;
5b6dd868
BS
497 cpu->thread_id = qemu_get_thread_id();
498#endif
bdc44640 499 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
500#if defined(CONFIG_USER_ONLY)
501 cpu_list_unlock();
502#endif
e0d47944
AF
503 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
504 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
505 }
5b6dd868 506#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
507 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
508 cpu_save, cpu_load, env);
b170fce3 509 assert(cc->vmsd == NULL);
e0d47944 510 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 511#endif
b170fce3
AF
512 if (cc->vmsd != NULL) {
513 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
514 }
ea041c0e
FB
515}
516
1fddef4b 517#if defined(TARGET_HAS_ICE)
94df27fd 518#if defined(CONFIG_USER_ONLY)
00b941e5 519static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
520{
521 tb_invalidate_phys_page_range(pc, pc + 1, 0);
522}
523#else
00b941e5 524static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 525{
e8262a1b
MF
526 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
527 if (phys != -1) {
09daed84 528 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 529 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 530 }
1e7855a5 531}
c27004ec 532#endif
94df27fd 533#endif /* TARGET_HAS_ICE */
d720b93d 534
c527ee8f 535#if defined(CONFIG_USER_ONLY)
75a34036 536void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
537
538{
539}
540
75a34036 541int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
542 int flags, CPUWatchpoint **watchpoint)
543{
544 return -ENOSYS;
545}
546#else
6658ffb8 547/* Add a watchpoint. */
75a34036 548int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 549 int flags, CPUWatchpoint **watchpoint)
6658ffb8 550{
75a34036 551 vaddr len_mask = ~(len - 1);
c0ce998e 552 CPUWatchpoint *wp;
6658ffb8 553
b4051334 554 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
555 if ((len & (len - 1)) || (addr & ~len_mask) ||
556 len == 0 || len > TARGET_PAGE_SIZE) {
75a34036
AF
557 error_report("tried to set invalid watchpoint at %"
558 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
559 return -EINVAL;
560 }
7267c094 561 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
562
563 wp->vaddr = addr;
b4051334 564 wp->len_mask = len_mask;
a1d1bb31
AL
565 wp->flags = flags;
566
2dc9f411 567 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
568 if (flags & BP_GDB) {
569 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
570 } else {
571 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
572 }
6658ffb8 573
31b030d4 574 tlb_flush_page(cpu, addr);
a1d1bb31
AL
575
576 if (watchpoint)
577 *watchpoint = wp;
578 return 0;
6658ffb8
PB
579}
580
a1d1bb31 581/* Remove a specific watchpoint. */
75a34036 582int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 583 int flags)
6658ffb8 584{
75a34036 585 vaddr len_mask = ~(len - 1);
a1d1bb31 586 CPUWatchpoint *wp;
6658ffb8 587
ff4700b0 588 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
b4051334 589 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 590 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 591 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
592 return 0;
593 }
594 }
a1d1bb31 595 return -ENOENT;
6658ffb8
PB
596}
597
a1d1bb31 598/* Remove a specific watchpoint by reference. */
75a34036 599void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 600{
ff4700b0 601 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 602
31b030d4 603 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 604
7267c094 605 g_free(watchpoint);
a1d1bb31
AL
606}
607
608/* Remove all matching watchpoints. */
75a34036 609void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 610{
c0ce998e 611 CPUWatchpoint *wp, *next;
a1d1bb31 612
ff4700b0 613 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
614 if (wp->flags & mask) {
615 cpu_watchpoint_remove_by_ref(cpu, wp);
616 }
c0ce998e 617 }
7d03f82f 618}
c527ee8f 619#endif
7d03f82f 620
a1d1bb31 621/* Add a breakpoint. */
b3310ab3 622int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 623 CPUBreakpoint **breakpoint)
4c3a88a2 624{
1fddef4b 625#if defined(TARGET_HAS_ICE)
c0ce998e 626 CPUBreakpoint *bp;
3b46e624 627
7267c094 628 bp = g_malloc(sizeof(*bp));
4c3a88a2 629
a1d1bb31
AL
630 bp->pc = pc;
631 bp->flags = flags;
632
2dc9f411 633 /* keep all GDB-injected breakpoints in front */
00b941e5 634 if (flags & BP_GDB) {
f0c3c505 635 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 636 } else {
f0c3c505 637 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 638 }
3b46e624 639
f0c3c505 640 breakpoint_invalidate(cpu, pc);
a1d1bb31 641
00b941e5 642 if (breakpoint) {
a1d1bb31 643 *breakpoint = bp;
00b941e5 644 }
4c3a88a2
FB
645 return 0;
646#else
a1d1bb31 647 return -ENOSYS;
4c3a88a2
FB
648#endif
649}
650
a1d1bb31 651/* Remove a specific breakpoint. */
b3310ab3 652int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 653{
7d03f82f 654#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
655 CPUBreakpoint *bp;
656
f0c3c505 657 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 658 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 659 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
660 return 0;
661 }
7d03f82f 662 }
a1d1bb31
AL
663 return -ENOENT;
664#else
665 return -ENOSYS;
7d03f82f
EI
666#endif
667}
668
a1d1bb31 669/* Remove a specific breakpoint by reference. */
b3310ab3 670void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 671{
1fddef4b 672#if defined(TARGET_HAS_ICE)
f0c3c505
AF
673 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
674
675 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 676
7267c094 677 g_free(breakpoint);
a1d1bb31
AL
678#endif
679}
680
681/* Remove all matching breakpoints. */
b3310ab3 682void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31
AL
683{
684#if defined(TARGET_HAS_ICE)
c0ce998e 685 CPUBreakpoint *bp, *next;
a1d1bb31 686
f0c3c505 687 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
688 if (bp->flags & mask) {
689 cpu_breakpoint_remove_by_ref(cpu, bp);
690 }
c0ce998e 691 }
4c3a88a2
FB
692#endif
693}
694
c33a346e
FB
695/* enable or disable single step mode. EXCP_DEBUG is returned by the
696 CPU loop after each instruction */
3825b28f 697void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 698{
1fddef4b 699#if defined(TARGET_HAS_ICE)
ed2803da
AF
700 if (cpu->singlestep_enabled != enabled) {
701 cpu->singlestep_enabled = enabled;
702 if (kvm_enabled()) {
38e478ec 703 kvm_update_guest_debug(cpu, 0);
ed2803da 704 } else {
ccbb4d44 705 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 706 /* XXX: only flush what is necessary */
38e478ec 707 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
708 tb_flush(env);
709 }
c33a346e
FB
710 }
711#endif
712}
713
a47dddd7 714void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
715{
716 va_list ap;
493ae1f0 717 va_list ap2;
7501267e
FB
718
719 va_start(ap, fmt);
493ae1f0 720 va_copy(ap2, ap);
7501267e
FB
721 fprintf(stderr, "qemu: fatal: ");
722 vfprintf(stderr, fmt, ap);
723 fprintf(stderr, "\n");
878096ee 724 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
725 if (qemu_log_enabled()) {
726 qemu_log("qemu: fatal: ");
727 qemu_log_vprintf(fmt, ap2);
728 qemu_log("\n");
a0762859 729 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 730 qemu_log_flush();
93fcfe39 731 qemu_log_close();
924edcae 732 }
493ae1f0 733 va_end(ap2);
f9373291 734 va_end(ap);
fd052bf6
RV
735#if defined(CONFIG_USER_ONLY)
736 {
737 struct sigaction act;
738 sigfillset(&act.sa_mask);
739 act.sa_handler = SIG_DFL;
740 sigaction(SIGABRT, &act, NULL);
741 }
742#endif
7501267e
FB
743 abort();
744}
745
0124311e 746#if !defined(CONFIG_USER_ONLY)
041603fe
PB
747static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
748{
749 RAMBlock *block;
750
751 /* The list is protected by the iothread lock here. */
752 block = ram_list.mru_block;
753 if (block && addr - block->offset < block->length) {
754 goto found;
755 }
756 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
757 if (addr - block->offset < block->length) {
758 goto found;
759 }
760 }
761
762 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
763 abort();
764
765found:
766 ram_list.mru_block = block;
767 return block;
768}
769
a2f4d5be 770static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 771{
041603fe 772 ram_addr_t start1;
a2f4d5be
JQ
773 RAMBlock *block;
774 ram_addr_t end;
775
776 end = TARGET_PAGE_ALIGN(start + length);
777 start &= TARGET_PAGE_MASK;
d24981d3 778
041603fe
PB
779 block = qemu_get_ram_block(start);
780 assert(block == qemu_get_ram_block(end - 1));
781 start1 = (uintptr_t)block->host + (start - block->offset);
782 cpu_tlb_reset_dirty_all(start1, length);
d24981d3
JQ
783}
784
5579c7f3 785/* Note: start and end must be within the same ram block. */
a2f4d5be 786void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 787 unsigned client)
1ccde1cb 788{
1ccde1cb
FB
789 if (length == 0)
790 return;
ace694cc 791 cpu_physical_memory_clear_dirty_range(start, length, client);
f23db169 792
d24981d3 793 if (tcg_enabled()) {
a2f4d5be 794 tlb_reset_dirty_range_all(start, length);
5579c7f3 795 }
1ccde1cb
FB
796}
797
981fdf23 798static void cpu_physical_memory_set_dirty_tracking(bool enable)
74576198
AL
799{
800 in_migration = enable;
74576198
AL
801}
802
bb0e627a 803hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
804 MemoryRegionSection *section,
805 target_ulong vaddr,
806 hwaddr paddr, hwaddr xlat,
807 int prot,
808 target_ulong *address)
e5548617 809{
a8170e5e 810 hwaddr iotlb;
e5548617
BS
811 CPUWatchpoint *wp;
812
cc5bea60 813 if (memory_region_is_ram(section->mr)) {
e5548617
BS
814 /* Normal RAM. */
815 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 816 + xlat;
e5548617 817 if (!section->readonly) {
b41aac4f 818 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 819 } else {
b41aac4f 820 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
821 }
822 } else {
1b3fb98f 823 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 824 iotlb += xlat;
e5548617
BS
825 }
826
827 /* Make accesses to pages with watchpoints go via the
828 watchpoint trap routines. */
ff4700b0 829 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
e5548617
BS
830 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
831 /* Avoid trapping reads of pages with a write breakpoint. */
832 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 833 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
834 *address |= TLB_MMIO;
835 break;
836 }
837 }
838 }
839
840 return iotlb;
841}
9fa3e853
FB
842#endif /* defined(CONFIG_USER_ONLY) */
843
e2eef170 844#if !defined(CONFIG_USER_ONLY)
8da3ff18 845
c227f099 846static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 847 uint16_t section);
acc9d80b 848static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 849
575ddeb4 850static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
91138037
MA
851
852/*
853 * Set a custom physical guest memory alloator.
854 * Accelerators with unusual needs may need this. Hopefully, we can
855 * get rid of it eventually.
856 */
575ddeb4 857void phys_mem_set_alloc(void *(*alloc)(size_t))
91138037
MA
858{
859 phys_mem_alloc = alloc;
860}
861
53cb28cb
MA
862static uint16_t phys_section_add(PhysPageMap *map,
863 MemoryRegionSection *section)
5312bd8b 864{
68f3f65b
PB
865 /* The physical section number is ORed with a page-aligned
866 * pointer to produce the iotlb entries. Thus it should
867 * never overflow into the page-aligned value.
868 */
53cb28cb 869 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 870
53cb28cb
MA
871 if (map->sections_nb == map->sections_nb_alloc) {
872 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
873 map->sections = g_renew(MemoryRegionSection, map->sections,
874 map->sections_nb_alloc);
5312bd8b 875 }
53cb28cb 876 map->sections[map->sections_nb] = *section;
dfde4e6e 877 memory_region_ref(section->mr);
53cb28cb 878 return map->sections_nb++;
5312bd8b
AK
879}
880
058bc4b5
PB
881static void phys_section_destroy(MemoryRegion *mr)
882{
dfde4e6e
PB
883 memory_region_unref(mr);
884
058bc4b5
PB
885 if (mr->subpage) {
886 subpage_t *subpage = container_of(mr, subpage_t, iomem);
887 memory_region_destroy(&subpage->iomem);
888 g_free(subpage);
889 }
890}
891
6092666e 892static void phys_sections_free(PhysPageMap *map)
5312bd8b 893{
9affd6fc
PB
894 while (map->sections_nb > 0) {
895 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
896 phys_section_destroy(section->mr);
897 }
9affd6fc
PB
898 g_free(map->sections);
899 g_free(map->nodes);
5312bd8b
AK
900}
901
ac1970fb 902static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
903{
904 subpage_t *subpage;
a8170e5e 905 hwaddr base = section->offset_within_address_space
0f0cb164 906 & TARGET_PAGE_MASK;
97115a8d 907 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 908 d->map.nodes, d->map.sections);
0f0cb164
AK
909 MemoryRegionSection subsection = {
910 .offset_within_address_space = base,
052e87b0 911 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 912 };
a8170e5e 913 hwaddr start, end;
0f0cb164 914
f3705d53 915 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 916
f3705d53 917 if (!(existing->mr->subpage)) {
acc9d80b 918 subpage = subpage_init(d->as, base);
3be91e86 919 subsection.address_space = d->as;
0f0cb164 920 subsection.mr = &subpage->iomem;
ac1970fb 921 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 922 phys_section_add(&d->map, &subsection));
0f0cb164 923 } else {
f3705d53 924 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
925 }
926 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 927 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
928 subpage_register(subpage, start, end,
929 phys_section_add(&d->map, section));
0f0cb164
AK
930}
931
932
052e87b0
PB
933static void register_multipage(AddressSpaceDispatch *d,
934 MemoryRegionSection *section)
33417e70 935{
a8170e5e 936 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 937 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
938 uint64_t num_pages = int128_get64(int128_rshift(section->size,
939 TARGET_PAGE_BITS));
dd81124b 940
733d5ef5
PB
941 assert(num_pages);
942 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
943}
944
ac1970fb 945static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 946{
89ae337a 947 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 948 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 949 MemoryRegionSection now = *section, remain = *section;
052e87b0 950 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 951
733d5ef5
PB
952 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
953 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
954 - now.offset_within_address_space;
955
052e87b0 956 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 957 register_subpage(d, &now);
733d5ef5 958 } else {
052e87b0 959 now.size = int128_zero();
733d5ef5 960 }
052e87b0
PB
961 while (int128_ne(remain.size, now.size)) {
962 remain.size = int128_sub(remain.size, now.size);
963 remain.offset_within_address_space += int128_get64(now.size);
964 remain.offset_within_region += int128_get64(now.size);
69b67646 965 now = remain;
052e87b0 966 if (int128_lt(remain.size, page_size)) {
733d5ef5 967 register_subpage(d, &now);
88266249 968 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 969 now.size = page_size;
ac1970fb 970 register_subpage(d, &now);
69b67646 971 } else {
052e87b0 972 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 973 register_multipage(d, &now);
69b67646 974 }
0f0cb164
AK
975 }
976}
977
62a2744c
SY
978void qemu_flush_coalesced_mmio_buffer(void)
979{
980 if (kvm_enabled())
981 kvm_flush_coalesced_mmio_buffer();
982}
983
b2a8658e
UD
984void qemu_mutex_lock_ramlist(void)
985{
986 qemu_mutex_lock(&ram_list.mutex);
987}
988
989void qemu_mutex_unlock_ramlist(void)
990{
991 qemu_mutex_unlock(&ram_list.mutex);
992}
993
e1e84ba0 994#ifdef __linux__
c902760f
MT
995
996#include <sys/vfs.h>
997
998#define HUGETLBFS_MAGIC 0x958458f6
999
1000static long gethugepagesize(const char *path)
1001{
1002 struct statfs fs;
1003 int ret;
1004
1005 do {
9742bf26 1006 ret = statfs(path, &fs);
c902760f
MT
1007 } while (ret != 0 && errno == EINTR);
1008
1009 if (ret != 0) {
9742bf26
YT
1010 perror(path);
1011 return 0;
c902760f
MT
1012 }
1013
1014 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1015 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1016
1017 return fs.f_bsize;
1018}
1019
04b16653
AW
1020static void *file_ram_alloc(RAMBlock *block,
1021 ram_addr_t memory,
7f56e740
PB
1022 const char *path,
1023 Error **errp)
c902760f
MT
1024{
1025 char *filename;
8ca761f6
PF
1026 char *sanitized_name;
1027 char *c;
c902760f
MT
1028 void *area;
1029 int fd;
c902760f
MT
1030 unsigned long hpagesize;
1031
1032 hpagesize = gethugepagesize(path);
1033 if (!hpagesize) {
f9a49dfa 1034 goto error;
c902760f
MT
1035 }
1036
1037 if (memory < hpagesize) {
1038 return NULL;
1039 }
1040
1041 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1042 error_setg(errp,
1043 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1044 goto error;
c902760f
MT
1045 }
1046
8ca761f6
PF
1047 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1048 sanitized_name = g_strdup(block->mr->name);
1049 for (c = sanitized_name; *c != '\0'; c++) {
1050 if (*c == '/')
1051 *c = '_';
1052 }
1053
1054 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1055 sanitized_name);
1056 g_free(sanitized_name);
c902760f
MT
1057
1058 fd = mkstemp(filename);
1059 if (fd < 0) {
7f56e740
PB
1060 error_setg_errno(errp, errno,
1061 "unable to create backing store for hugepages");
e4ada482 1062 g_free(filename);
f9a49dfa 1063 goto error;
c902760f
MT
1064 }
1065 unlink(filename);
e4ada482 1066 g_free(filename);
c902760f
MT
1067
1068 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1069
1070 /*
1071 * ftruncate is not supported by hugetlbfs in older
1072 * hosts, so don't bother bailing out on errors.
1073 * If anything goes wrong with it under other filesystems,
1074 * mmap will fail.
1075 */
7f56e740 1076 if (ftruncate(fd, memory)) {
9742bf26 1077 perror("ftruncate");
7f56e740 1078 }
c902760f 1079
dbcb8981
PB
1080 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1081 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1082 fd, 0);
c902760f 1083 if (area == MAP_FAILED) {
7f56e740
PB
1084 error_setg_errno(errp, errno,
1085 "unable to map backing store for hugepages");
9742bf26 1086 close(fd);
f9a49dfa 1087 goto error;
c902760f 1088 }
ef36fa14
MT
1089
1090 if (mem_prealloc) {
38183310 1091 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1092 }
1093
04b16653 1094 block->fd = fd;
c902760f 1095 return area;
f9a49dfa
MT
1096
1097error:
1098 if (mem_prealloc) {
1099 exit(1);
1100 }
1101 return NULL;
c902760f
MT
1102}
1103#endif
1104
d17b5288 1105static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1106{
1107 RAMBlock *block, *next_block;
3e837b2c 1108 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1109
49cd9ac6
SH
1110 assert(size != 0); /* it would hand out same offset multiple times */
1111
a3161038 1112 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1113 return 0;
1114
a3161038 1115 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1116 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1117
1118 end = block->offset + block->length;
1119
a3161038 1120 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1121 if (next_block->offset >= end) {
1122 next = MIN(next, next_block->offset);
1123 }
1124 }
1125 if (next - end >= size && next - end < mingap) {
3e837b2c 1126 offset = end;
04b16653
AW
1127 mingap = next - end;
1128 }
1129 }
3e837b2c
AW
1130
1131 if (offset == RAM_ADDR_MAX) {
1132 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1133 (uint64_t)size);
1134 abort();
1135 }
1136
04b16653
AW
1137 return offset;
1138}
1139
652d7ec2 1140ram_addr_t last_ram_offset(void)
d17b5288
AW
1141{
1142 RAMBlock *block;
1143 ram_addr_t last = 0;
1144
a3161038 1145 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1146 last = MAX(last, block->offset + block->length);
1147
1148 return last;
1149}
1150
ddb97f1d
JB
1151static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1152{
1153 int ret;
ddb97f1d
JB
1154
1155 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1156 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1157 "dump-guest-core", true)) {
ddb97f1d
JB
1158 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1159 if (ret) {
1160 perror("qemu_madvise");
1161 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1162 "but dump_guest_core=off specified\n");
1163 }
1164 }
1165}
1166
20cfe881 1167static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1168{
20cfe881 1169 RAMBlock *block;
84b89d78 1170
a3161038 1171 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1172 if (block->offset == addr) {
20cfe881 1173 return block;
c5705a77
AK
1174 }
1175 }
20cfe881
HT
1176
1177 return NULL;
1178}
1179
1180void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1181{
1182 RAMBlock *new_block = find_ram_block(addr);
1183 RAMBlock *block;
1184
c5705a77
AK
1185 assert(new_block);
1186 assert(!new_block->idstr[0]);
84b89d78 1187
09e5ab63
AL
1188 if (dev) {
1189 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1190 if (id) {
1191 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1192 g_free(id);
84b89d78
CM
1193 }
1194 }
1195 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1196
b2a8658e
UD
1197 /* This assumes the iothread lock is taken here too. */
1198 qemu_mutex_lock_ramlist();
a3161038 1199 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1200 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1201 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1202 new_block->idstr);
1203 abort();
1204 }
1205 }
b2a8658e 1206 qemu_mutex_unlock_ramlist();
c5705a77
AK
1207}
1208
20cfe881
HT
1209void qemu_ram_unset_idstr(ram_addr_t addr)
1210{
1211 RAMBlock *block = find_ram_block(addr);
1212
1213 if (block) {
1214 memset(block->idstr, 0, sizeof(block->idstr));
1215 }
1216}
1217
8490fc78
LC
1218static int memory_try_enable_merging(void *addr, size_t len)
1219{
2ff3de68 1220 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1221 /* disabled by the user */
1222 return 0;
1223 }
1224
1225 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1226}
1227
e1c57ab8 1228static ram_addr_t ram_block_add(RAMBlock *new_block)
c5705a77 1229{
e1c57ab8 1230 RAMBlock *block;
2152f5ca
JQ
1231 ram_addr_t old_ram_size, new_ram_size;
1232
1233 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1234
b2a8658e
UD
1235 /* This assumes the iothread lock is taken here too. */
1236 qemu_mutex_lock_ramlist();
e1c57ab8
PB
1237 new_block->offset = find_ram_offset(new_block->length);
1238
1239 if (!new_block->host) {
1240 if (xen_enabled()) {
1241 xen_ram_alloc(new_block->offset, new_block->length, new_block->mr);
1242 } else {
1243 new_block->host = phys_mem_alloc(new_block->length);
39228250
MA
1244 if (!new_block->host) {
1245 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1246 new_block->mr->name, strerror(errno));
1247 exit(1);
1248 }
e1c57ab8 1249 memory_try_enable_merging(new_block->host, new_block->length);
6977dfe6 1250 }
c902760f 1251 }
94a6b54f 1252
abb26d63
PB
1253 /* Keep the list sorted from biggest to smallest block. */
1254 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1255 if (block->length < new_block->length) {
1256 break;
1257 }
1258 }
1259 if (block) {
1260 QTAILQ_INSERT_BEFORE(block, new_block, next);
1261 } else {
1262 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1263 }
0d6d3c87 1264 ram_list.mru_block = NULL;
94a6b54f 1265
f798b07f 1266 ram_list.version++;
b2a8658e 1267 qemu_mutex_unlock_ramlist();
f798b07f 1268
2152f5ca
JQ
1269 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1270
1271 if (new_ram_size > old_ram_size) {
1ab4c8ce
JQ
1272 int i;
1273 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1274 ram_list.dirty_memory[i] =
1275 bitmap_zero_extend(ram_list.dirty_memory[i],
1276 old_ram_size, new_ram_size);
1277 }
2152f5ca 1278 }
e1c57ab8 1279 cpu_physical_memory_set_dirty_range(new_block->offset, new_block->length);
94a6b54f 1280
e1c57ab8
PB
1281 qemu_ram_setup_dump(new_block->host, new_block->length);
1282 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE);
1283 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK);
ddb97f1d 1284
e1c57ab8
PB
1285 if (kvm_enabled()) {
1286 kvm_setup_guest_memory(new_block->host, new_block->length);
1287 }
6f0437e8 1288
94a6b54f
PB
1289 return new_block->offset;
1290}
e9a1ab19 1291
0b183fc8 1292#ifdef __linux__
e1c57ab8 1293ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1294 bool share, const char *mem_path,
7f56e740 1295 Error **errp)
e1c57ab8
PB
1296{
1297 RAMBlock *new_block;
1298
1299 if (xen_enabled()) {
7f56e740
PB
1300 error_setg(errp, "-mem-path not supported with Xen");
1301 return -1;
e1c57ab8
PB
1302 }
1303
1304 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1305 /*
1306 * file_ram_alloc() needs to allocate just like
1307 * phys_mem_alloc, but we haven't bothered to provide
1308 * a hook there.
1309 */
7f56e740
PB
1310 error_setg(errp,
1311 "-mem-path not supported with this accelerator");
1312 return -1;
e1c57ab8
PB
1313 }
1314
1315 size = TARGET_PAGE_ALIGN(size);
1316 new_block = g_malloc0(sizeof(*new_block));
1317 new_block->mr = mr;
1318 new_block->length = size;
dbcb8981 1319 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1320 new_block->host = file_ram_alloc(new_block, size,
1321 mem_path, errp);
1322 if (!new_block->host) {
1323 g_free(new_block);
1324 return -1;
1325 }
1326
e1c57ab8
PB
1327 return ram_block_add(new_block);
1328}
0b183fc8 1329#endif
e1c57ab8
PB
1330
1331ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1332 MemoryRegion *mr)
1333{
1334 RAMBlock *new_block;
1335
1336 size = TARGET_PAGE_ALIGN(size);
1337 new_block = g_malloc0(sizeof(*new_block));
1338 new_block->mr = mr;
1339 new_block->length = size;
1340 new_block->fd = -1;
1341 new_block->host = host;
1342 if (host) {
7bd4f430 1343 new_block->flags |= RAM_PREALLOC;
e1c57ab8
PB
1344 }
1345 return ram_block_add(new_block);
1346}
1347
c5705a77 1348ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1349{
c5705a77 1350 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1351}
1352
1f2e98b6
AW
1353void qemu_ram_free_from_ptr(ram_addr_t addr)
1354{
1355 RAMBlock *block;
1356
b2a8658e
UD
1357 /* This assumes the iothread lock is taken here too. */
1358 qemu_mutex_lock_ramlist();
a3161038 1359 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1360 if (addr == block->offset) {
a3161038 1361 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1362 ram_list.mru_block = NULL;
f798b07f 1363 ram_list.version++;
7267c094 1364 g_free(block);
b2a8658e 1365 break;
1f2e98b6
AW
1366 }
1367 }
b2a8658e 1368 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1369}
1370
c227f099 1371void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1372{
04b16653
AW
1373 RAMBlock *block;
1374
b2a8658e
UD
1375 /* This assumes the iothread lock is taken here too. */
1376 qemu_mutex_lock_ramlist();
a3161038 1377 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1378 if (addr == block->offset) {
a3161038 1379 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1380 ram_list.mru_block = NULL;
f798b07f 1381 ram_list.version++;
7bd4f430 1382 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1383 ;
dfeaf2ab
MA
1384 } else if (xen_enabled()) {
1385 xen_invalidate_map_cache_entry(block->host);
089f3f76 1386#ifndef _WIN32
3435f395
MA
1387 } else if (block->fd >= 0) {
1388 munmap(block->host, block->length);
1389 close(block->fd);
089f3f76 1390#endif
04b16653 1391 } else {
dfeaf2ab 1392 qemu_anon_ram_free(block->host, block->length);
04b16653 1393 }
7267c094 1394 g_free(block);
b2a8658e 1395 break;
04b16653
AW
1396 }
1397 }
b2a8658e 1398 qemu_mutex_unlock_ramlist();
04b16653 1399
e9a1ab19
FB
1400}
1401
cd19cfa2
HY
1402#ifndef _WIN32
1403void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1404{
1405 RAMBlock *block;
1406 ram_addr_t offset;
1407 int flags;
1408 void *area, *vaddr;
1409
a3161038 1410 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1411 offset = addr - block->offset;
1412 if (offset < block->length) {
1413 vaddr = block->host + offset;
7bd4f430 1414 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1415 ;
dfeaf2ab
MA
1416 } else if (xen_enabled()) {
1417 abort();
cd19cfa2
HY
1418 } else {
1419 flags = MAP_FIXED;
1420 munmap(vaddr, length);
3435f395 1421 if (block->fd >= 0) {
dbcb8981
PB
1422 flags |= (block->flags & RAM_SHARED ?
1423 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1424 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1425 flags, block->fd, offset);
cd19cfa2 1426 } else {
2eb9fbaa
MA
1427 /*
1428 * Remap needs to match alloc. Accelerators that
1429 * set phys_mem_alloc never remap. If they did,
1430 * we'd need a remap hook here.
1431 */
1432 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1433
cd19cfa2
HY
1434 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1435 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1436 flags, -1, 0);
cd19cfa2
HY
1437 }
1438 if (area != vaddr) {
f15fbc4b
AP
1439 fprintf(stderr, "Could not remap addr: "
1440 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1441 length, addr);
1442 exit(1);
1443 }
8490fc78 1444 memory_try_enable_merging(vaddr, length);
ddb97f1d 1445 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1446 }
1447 return;
1448 }
1449 }
1450}
1451#endif /* !_WIN32 */
1452
a35ba7be
PB
1453int qemu_get_ram_fd(ram_addr_t addr)
1454{
1455 RAMBlock *block = qemu_get_ram_block(addr);
1456
1457 return block->fd;
1458}
1459
1b5ec234
PB
1460/* Return a host pointer to ram allocated with qemu_ram_alloc.
1461 With the exception of the softmmu code in this file, this should
1462 only be used for local memory (e.g. video ram) that the device owns,
1463 and knows it isn't going to access beyond the end of the block.
1464
1465 It should not be used for general purpose DMA.
1466 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1467 */
1468void *qemu_get_ram_ptr(ram_addr_t addr)
1469{
1470 RAMBlock *block = qemu_get_ram_block(addr);
1471
0d6d3c87
PB
1472 if (xen_enabled()) {
1473 /* We need to check if the requested address is in the RAM
1474 * because we don't want to map the entire memory in QEMU.
1475 * In that case just map until the end of the page.
1476 */
1477 if (block->offset == 0) {
1478 return xen_map_cache(addr, 0, 0);
1479 } else if (block->host == NULL) {
1480 block->host =
1481 xen_map_cache(block->offset, block->length, 1);
1482 }
1483 }
1484 return block->host + (addr - block->offset);
dc828ca1
PB
1485}
1486
38bee5dc
SS
1487/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1488 * but takes a size argument */
cb85f7ab 1489static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1490{
8ab934f9
SS
1491 if (*size == 0) {
1492 return NULL;
1493 }
868bb33f 1494 if (xen_enabled()) {
e41d7c69 1495 return xen_map_cache(addr, *size, 1);
868bb33f 1496 } else {
38bee5dc
SS
1497 RAMBlock *block;
1498
a3161038 1499 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1500 if (addr - block->offset < block->length) {
1501 if (addr - block->offset + *size > block->length)
1502 *size = block->length - addr + block->offset;
1503 return block->host + (addr - block->offset);
1504 }
1505 }
1506
1507 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1508 abort();
38bee5dc
SS
1509 }
1510}
1511
7443b437
PB
1512/* Some of the softmmu routines need to translate from a host pointer
1513 (typically a TLB entry) back to a ram offset. */
1b5ec234 1514MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1515{
94a6b54f
PB
1516 RAMBlock *block;
1517 uint8_t *host = ptr;
1518
868bb33f 1519 if (xen_enabled()) {
e41d7c69 1520 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1521 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1522 }
1523
23887b79
PB
1524 block = ram_list.mru_block;
1525 if (block && block->host && host - block->host < block->length) {
1526 goto found;
1527 }
1528
a3161038 1529 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1530 /* This case append when the block is not mapped. */
1531 if (block->host == NULL) {
1532 continue;
1533 }
f471a17e 1534 if (host - block->host < block->length) {
23887b79 1535 goto found;
f471a17e 1536 }
94a6b54f 1537 }
432d268c 1538
1b5ec234 1539 return NULL;
23887b79
PB
1540
1541found:
1542 *ram_addr = block->offset + (host - block->host);
1b5ec234 1543 return block->mr;
e890261f 1544}
f471a17e 1545
a8170e5e 1546static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1547 uint64_t val, unsigned size)
9fa3e853 1548{
52159192 1549 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1550 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1551 }
0e0df1e2
AK
1552 switch (size) {
1553 case 1:
1554 stb_p(qemu_get_ram_ptr(ram_addr), val);
1555 break;
1556 case 2:
1557 stw_p(qemu_get_ram_ptr(ram_addr), val);
1558 break;
1559 case 4:
1560 stl_p(qemu_get_ram_ptr(ram_addr), val);
1561 break;
1562 default:
1563 abort();
3a7d929e 1564 }
52159192
JQ
1565 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
1566 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
f23db169
FB
1567 /* we remove the notdirty callback only if the code has been
1568 flushed */
a2cd8c85 1569 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1570 CPUArchState *env = current_cpu->env_ptr;
93afeade 1571 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1572 }
9fa3e853
FB
1573}
1574
b018ddf6
PB
1575static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1576 unsigned size, bool is_write)
1577{
1578 return is_write;
1579}
1580
0e0df1e2 1581static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1582 .write = notdirty_mem_write,
b018ddf6 1583 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1584 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1585};
1586
0f459d16 1587/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1588static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1589{
93afeade
AF
1590 CPUState *cpu = current_cpu;
1591 CPUArchState *env = cpu->env_ptr;
06d55cc1 1592 target_ulong pc, cs_base;
0f459d16 1593 target_ulong vaddr;
a1d1bb31 1594 CPUWatchpoint *wp;
06d55cc1 1595 int cpu_flags;
0f459d16 1596
ff4700b0 1597 if (cpu->watchpoint_hit) {
06d55cc1
AL
1598 /* We re-entered the check after replacing the TB. Now raise
1599 * the debug interrupt so that is will trigger after the
1600 * current instruction. */
93afeade 1601 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1602 return;
1603 }
93afeade 1604 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1605 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
b4051334
AL
1606 if ((vaddr == (wp->vaddr & len_mask) ||
1607 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28 1608 wp->flags |= BP_WATCHPOINT_HIT;
ff4700b0
AF
1609 if (!cpu->watchpoint_hit) {
1610 cpu->watchpoint_hit = wp;
239c51a5 1611 tb_check_watchpoint(cpu);
6e140f28 1612 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1613 cpu->exception_index = EXCP_DEBUG;
5638d180 1614 cpu_loop_exit(cpu);
6e140f28
AL
1615 } else {
1616 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1617 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1618 cpu_resume_from_signal(cpu, NULL);
6e140f28 1619 }
06d55cc1 1620 }
6e140f28
AL
1621 } else {
1622 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1623 }
1624 }
1625}
1626
6658ffb8
PB
1627/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1628 so these check for a hit then pass through to the normal out-of-line
1629 phys routines. */
a8170e5e 1630static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1631 unsigned size)
6658ffb8 1632{
1ec9b909
AK
1633 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1634 switch (size) {
2c17449b 1635 case 1: return ldub_phys(&address_space_memory, addr);
41701aa4 1636 case 2: return lduw_phys(&address_space_memory, addr);
fdfba1a2 1637 case 4: return ldl_phys(&address_space_memory, addr);
1ec9b909
AK
1638 default: abort();
1639 }
6658ffb8
PB
1640}
1641
a8170e5e 1642static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1643 uint64_t val, unsigned size)
6658ffb8 1644{
1ec9b909
AK
1645 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1646 switch (size) {
67364150 1647 case 1:
db3be60d 1648 stb_phys(&address_space_memory, addr, val);
67364150
MF
1649 break;
1650 case 2:
5ce5944d 1651 stw_phys(&address_space_memory, addr, val);
67364150
MF
1652 break;
1653 case 4:
ab1da857 1654 stl_phys(&address_space_memory, addr, val);
67364150 1655 break;
1ec9b909
AK
1656 default: abort();
1657 }
6658ffb8
PB
1658}
1659
1ec9b909
AK
1660static const MemoryRegionOps watch_mem_ops = {
1661 .read = watch_mem_read,
1662 .write = watch_mem_write,
1663 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1664};
6658ffb8 1665
a8170e5e 1666static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1667 unsigned len)
db7b5426 1668{
acc9d80b
JK
1669 subpage_t *subpage = opaque;
1670 uint8_t buf[4];
791af8c8 1671
db7b5426 1672#if defined(DEBUG_SUBPAGE)
016e9d62 1673 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1674 subpage, len, addr);
db7b5426 1675#endif
acc9d80b
JK
1676 address_space_read(subpage->as, addr + subpage->base, buf, len);
1677 switch (len) {
1678 case 1:
1679 return ldub_p(buf);
1680 case 2:
1681 return lduw_p(buf);
1682 case 4:
1683 return ldl_p(buf);
1684 default:
1685 abort();
1686 }
db7b5426
BS
1687}
1688
a8170e5e 1689static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1690 uint64_t value, unsigned len)
db7b5426 1691{
acc9d80b
JK
1692 subpage_t *subpage = opaque;
1693 uint8_t buf[4];
1694
db7b5426 1695#if defined(DEBUG_SUBPAGE)
016e9d62 1696 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1697 " value %"PRIx64"\n",
1698 __func__, subpage, len, addr, value);
db7b5426 1699#endif
acc9d80b
JK
1700 switch (len) {
1701 case 1:
1702 stb_p(buf, value);
1703 break;
1704 case 2:
1705 stw_p(buf, value);
1706 break;
1707 case 4:
1708 stl_p(buf, value);
1709 break;
1710 default:
1711 abort();
1712 }
1713 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1714}
1715
c353e4cc 1716static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 1717 unsigned len, bool is_write)
c353e4cc 1718{
acc9d80b 1719 subpage_t *subpage = opaque;
c353e4cc 1720#if defined(DEBUG_SUBPAGE)
016e9d62 1721 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 1722 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1723#endif
1724
acc9d80b 1725 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 1726 len, is_write);
c353e4cc
PB
1727}
1728
70c68e44
AK
1729static const MemoryRegionOps subpage_ops = {
1730 .read = subpage_read,
1731 .write = subpage_write,
c353e4cc 1732 .valid.accepts = subpage_accepts,
70c68e44 1733 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1734};
1735
c227f099 1736static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1737 uint16_t section)
db7b5426
BS
1738{
1739 int idx, eidx;
1740
1741 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1742 return -1;
1743 idx = SUBPAGE_IDX(start);
1744 eidx = SUBPAGE_IDX(end);
1745#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1746 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1747 __func__, mmio, start, end, idx, eidx, section);
db7b5426 1748#endif
db7b5426 1749 for (; idx <= eidx; idx++) {
5312bd8b 1750 mmio->sub_section[idx] = section;
db7b5426
BS
1751 }
1752
1753 return 0;
1754}
1755
acc9d80b 1756static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1757{
c227f099 1758 subpage_t *mmio;
db7b5426 1759
7267c094 1760 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1761
acc9d80b 1762 mmio->as = as;
1eec614b 1763 mmio->base = base;
2c9b15ca 1764 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
70c68e44 1765 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1766 mmio->iomem.subpage = true;
db7b5426 1767#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1768 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1769 mmio, base, TARGET_PAGE_SIZE);
db7b5426 1770#endif
b41aac4f 1771 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1772
1773 return mmio;
1774}
1775
a656e22f
PC
1776static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1777 MemoryRegion *mr)
5312bd8b 1778{
a656e22f 1779 assert(as);
5312bd8b 1780 MemoryRegionSection section = {
a656e22f 1781 .address_space = as,
5312bd8b
AK
1782 .mr = mr,
1783 .offset_within_address_space = 0,
1784 .offset_within_region = 0,
052e87b0 1785 .size = int128_2_64(),
5312bd8b
AK
1786 };
1787
53cb28cb 1788 return phys_section_add(map, &section);
5312bd8b
AK
1789}
1790
77717094 1791MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
aa102231 1792{
77717094 1793 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1794}
1795
e9179ce1
AK
1796static void io_mem_init(void)
1797{
2c9b15ca
PB
1798 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1799 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
0e0df1e2 1800 "unassigned", UINT64_MAX);
2c9b15ca 1801 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
0e0df1e2 1802 "notdirty", UINT64_MAX);
2c9b15ca 1803 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1ec9b909 1804 "watch", UINT64_MAX);
e9179ce1
AK
1805}
1806
ac1970fb 1807static void mem_begin(MemoryListener *listener)
00752703
PB
1808{
1809 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
1810 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1811 uint16_t n;
1812
a656e22f 1813 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 1814 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 1815 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 1816 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 1817 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 1818 assert(n == PHYS_SECTION_ROM);
a656e22f 1819 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 1820 assert(n == PHYS_SECTION_WATCH);
00752703 1821
9736e55b 1822 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
1823 d->as = as;
1824 as->next_dispatch = d;
1825}
1826
1827static void mem_commit(MemoryListener *listener)
ac1970fb 1828{
89ae337a 1829 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1830 AddressSpaceDispatch *cur = as->dispatch;
1831 AddressSpaceDispatch *next = as->next_dispatch;
1832
53cb28cb 1833 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 1834
0475d94f 1835 as->dispatch = next;
b41aac4f 1836
53cb28cb
MA
1837 if (cur) {
1838 phys_sections_free(&cur->map);
1839 g_free(cur);
1840 }
9affd6fc
PB
1841}
1842
1d71148e 1843static void tcg_commit(MemoryListener *listener)
50c1e149 1844{
182735ef 1845 CPUState *cpu;
117712c3
AK
1846
1847 /* since each CPU stores ram addresses in its TLB cache, we must
1848 reset the modified entries */
1849 /* XXX: slow ! */
bdc44640 1850 CPU_FOREACH(cpu) {
33bde2e1
EI
1851 /* FIXME: Disentangle the cpu.h circular files deps so we can
1852 directly get the right CPU from listener. */
1853 if (cpu->tcg_as_listener != listener) {
1854 continue;
1855 }
00c8cb0a 1856 tlb_flush(cpu, 1);
117712c3 1857 }
50c1e149
AK
1858}
1859
93632747
AK
1860static void core_log_global_start(MemoryListener *listener)
1861{
981fdf23 1862 cpu_physical_memory_set_dirty_tracking(true);
93632747
AK
1863}
1864
1865static void core_log_global_stop(MemoryListener *listener)
1866{
981fdf23 1867 cpu_physical_memory_set_dirty_tracking(false);
93632747
AK
1868}
1869
93632747 1870static MemoryListener core_memory_listener = {
93632747
AK
1871 .log_global_start = core_log_global_start,
1872 .log_global_stop = core_log_global_stop,
ac1970fb 1873 .priority = 1,
93632747
AK
1874};
1875
ac1970fb
AK
1876void address_space_init_dispatch(AddressSpace *as)
1877{
00752703 1878 as->dispatch = NULL;
89ae337a 1879 as->dispatch_listener = (MemoryListener) {
ac1970fb 1880 .begin = mem_begin,
00752703 1881 .commit = mem_commit,
ac1970fb
AK
1882 .region_add = mem_add,
1883 .region_nop = mem_add,
1884 .priority = 0,
1885 };
89ae337a 1886 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1887}
1888
83f3c251
AK
1889void address_space_destroy_dispatch(AddressSpace *as)
1890{
1891 AddressSpaceDispatch *d = as->dispatch;
1892
89ae337a 1893 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1894 g_free(d);
1895 as->dispatch = NULL;
1896}
1897
62152b8a
AK
1898static void memory_map_init(void)
1899{
7267c094 1900 system_memory = g_malloc(sizeof(*system_memory));
03f49957 1901
57271d63 1902 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 1903 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1904
7267c094 1905 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
1906 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1907 65536);
7dca8043 1908 address_space_init(&address_space_io, system_io, "I/O");
93632747 1909
f6790af6 1910 memory_listener_register(&core_memory_listener, &address_space_memory);
62152b8a
AK
1911}
1912
1913MemoryRegion *get_system_memory(void)
1914{
1915 return system_memory;
1916}
1917
309cb471
AK
1918MemoryRegion *get_system_io(void)
1919{
1920 return system_io;
1921}
1922
e2eef170
PB
1923#endif /* !defined(CONFIG_USER_ONLY) */
1924
13eb76e0
FB
1925/* physical memory access (slow version, mainly for debug) */
1926#if defined(CONFIG_USER_ONLY)
f17ec444 1927int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 1928 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1929{
1930 int l, flags;
1931 target_ulong page;
53a5960a 1932 void * p;
13eb76e0
FB
1933
1934 while (len > 0) {
1935 page = addr & TARGET_PAGE_MASK;
1936 l = (page + TARGET_PAGE_SIZE) - addr;
1937 if (l > len)
1938 l = len;
1939 flags = page_get_flags(page);
1940 if (!(flags & PAGE_VALID))
a68fe89c 1941 return -1;
13eb76e0
FB
1942 if (is_write) {
1943 if (!(flags & PAGE_WRITE))
a68fe89c 1944 return -1;
579a97f7 1945 /* XXX: this code should not depend on lock_user */
72fb7daa 1946 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1947 return -1;
72fb7daa
AJ
1948 memcpy(p, buf, l);
1949 unlock_user(p, addr, l);
13eb76e0
FB
1950 } else {
1951 if (!(flags & PAGE_READ))
a68fe89c 1952 return -1;
579a97f7 1953 /* XXX: this code should not depend on lock_user */
72fb7daa 1954 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1955 return -1;
72fb7daa 1956 memcpy(buf, p, l);
5b257578 1957 unlock_user(p, addr, 0);
13eb76e0
FB
1958 }
1959 len -= l;
1960 buf += l;
1961 addr += l;
1962 }
a68fe89c 1963 return 0;
13eb76e0 1964}
8df1cd07 1965
13eb76e0 1966#else
51d7a9eb 1967
a8170e5e
AK
1968static void invalidate_and_set_dirty(hwaddr addr,
1969 hwaddr length)
51d7a9eb 1970{
a2cd8c85 1971 if (cpu_physical_memory_is_clean(addr)) {
51d7a9eb
AP
1972 /* invalidate code */
1973 tb_invalidate_phys_page_range(addr, addr + length, 0);
1974 /* set dirty bit */
52159192
JQ
1975 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
1976 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
51d7a9eb 1977 }
e226939d 1978 xen_modified_memory(addr, length);
51d7a9eb
AP
1979}
1980
23326164 1981static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 1982{
e1622f4b 1983 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
1984
1985 /* Regions are assumed to support 1-4 byte accesses unless
1986 otherwise specified. */
23326164
RH
1987 if (access_size_max == 0) {
1988 access_size_max = 4;
1989 }
1990
1991 /* Bound the maximum access by the alignment of the address. */
1992 if (!mr->ops->impl.unaligned) {
1993 unsigned align_size_max = addr & -addr;
1994 if (align_size_max != 0 && align_size_max < access_size_max) {
1995 access_size_max = align_size_max;
1996 }
82f2563f 1997 }
23326164
RH
1998
1999 /* Don't attempt accesses larger than the maximum. */
2000 if (l > access_size_max) {
2001 l = access_size_max;
82f2563f 2002 }
098178f2
PB
2003 if (l & (l - 1)) {
2004 l = 1 << (qemu_fls(l) - 1);
2005 }
23326164
RH
2006
2007 return l;
82f2563f
PB
2008}
2009
fd8aaa76 2010bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 2011 int len, bool is_write)
13eb76e0 2012{
149f54b5 2013 hwaddr l;
13eb76e0 2014 uint8_t *ptr;
791af8c8 2015 uint64_t val;
149f54b5 2016 hwaddr addr1;
5c8a00ce 2017 MemoryRegion *mr;
fd8aaa76 2018 bool error = false;
3b46e624 2019
13eb76e0 2020 while (len > 0) {
149f54b5 2021 l = len;
5c8a00ce 2022 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2023
13eb76e0 2024 if (is_write) {
5c8a00ce
PB
2025 if (!memory_access_is_direct(mr, is_write)) {
2026 l = memory_access_size(mr, l, addr1);
4917cf44 2027 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2028 potential bugs */
23326164
RH
2029 switch (l) {
2030 case 8:
2031 /* 64 bit write access */
2032 val = ldq_p(buf);
2033 error |= io_mem_write(mr, addr1, val, 8);
2034 break;
2035 case 4:
1c213d19 2036 /* 32 bit write access */
c27004ec 2037 val = ldl_p(buf);
5c8a00ce 2038 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
2039 break;
2040 case 2:
1c213d19 2041 /* 16 bit write access */
c27004ec 2042 val = lduw_p(buf);
5c8a00ce 2043 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
2044 break;
2045 case 1:
1c213d19 2046 /* 8 bit write access */
c27004ec 2047 val = ldub_p(buf);
5c8a00ce 2048 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
2049 break;
2050 default:
2051 abort();
13eb76e0 2052 }
2bbfa05d 2053 } else {
5c8a00ce 2054 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2055 /* RAM case */
5579c7f3 2056 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2057 memcpy(ptr, buf, l);
51d7a9eb 2058 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
2059 }
2060 } else {
5c8a00ce 2061 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2062 /* I/O case */
5c8a00ce 2063 l = memory_access_size(mr, l, addr1);
23326164
RH
2064 switch (l) {
2065 case 8:
2066 /* 64 bit read access */
2067 error |= io_mem_read(mr, addr1, &val, 8);
2068 stq_p(buf, val);
2069 break;
2070 case 4:
13eb76e0 2071 /* 32 bit read access */
5c8a00ce 2072 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 2073 stl_p(buf, val);
23326164
RH
2074 break;
2075 case 2:
13eb76e0 2076 /* 16 bit read access */
5c8a00ce 2077 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 2078 stw_p(buf, val);
23326164
RH
2079 break;
2080 case 1:
1c213d19 2081 /* 8 bit read access */
5c8a00ce 2082 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 2083 stb_p(buf, val);
23326164
RH
2084 break;
2085 default:
2086 abort();
13eb76e0
FB
2087 }
2088 } else {
2089 /* RAM case */
5c8a00ce 2090 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2091 memcpy(buf, ptr, l);
13eb76e0
FB
2092 }
2093 }
2094 len -= l;
2095 buf += l;
2096 addr += l;
2097 }
fd8aaa76
PB
2098
2099 return error;
13eb76e0 2100}
8df1cd07 2101
fd8aaa76 2102bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2103 const uint8_t *buf, int len)
2104{
fd8aaa76 2105 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2106}
2107
fd8aaa76 2108bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2109{
fd8aaa76 2110 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2111}
2112
2113
a8170e5e 2114void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2115 int len, int is_write)
2116{
fd8aaa76 2117 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2118}
2119
582b55a9
AG
2120enum write_rom_type {
2121 WRITE_DATA,
2122 FLUSH_CACHE,
2123};
2124
2a221651 2125static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2126 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2127{
149f54b5 2128 hwaddr l;
d0ecd2aa 2129 uint8_t *ptr;
149f54b5 2130 hwaddr addr1;
5c8a00ce 2131 MemoryRegion *mr;
3b46e624 2132
d0ecd2aa 2133 while (len > 0) {
149f54b5 2134 l = len;
2a221651 2135 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2136
5c8a00ce
PB
2137 if (!(memory_region_is_ram(mr) ||
2138 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2139 /* do nothing */
2140 } else {
5c8a00ce 2141 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2142 /* ROM/RAM case */
5579c7f3 2143 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2144 switch (type) {
2145 case WRITE_DATA:
2146 memcpy(ptr, buf, l);
2147 invalidate_and_set_dirty(addr1, l);
2148 break;
2149 case FLUSH_CACHE:
2150 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2151 break;
2152 }
d0ecd2aa
FB
2153 }
2154 len -= l;
2155 buf += l;
2156 addr += l;
2157 }
2158}
2159
582b55a9 2160/* used for ROM loading : can write in RAM and ROM */
2a221651 2161void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2162 const uint8_t *buf, int len)
2163{
2a221651 2164 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2165}
2166
2167void cpu_flush_icache_range(hwaddr start, int len)
2168{
2169 /*
2170 * This function should do the same thing as an icache flush that was
2171 * triggered from within the guest. For TCG we are always cache coherent,
2172 * so there is no need to flush anything. For KVM / Xen we need to flush
2173 * the host's instruction cache at least.
2174 */
2175 if (tcg_enabled()) {
2176 return;
2177 }
2178
2a221651
EI
2179 cpu_physical_memory_write_rom_internal(&address_space_memory,
2180 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2181}
2182
6d16c2f8 2183typedef struct {
d3e71559 2184 MemoryRegion *mr;
6d16c2f8 2185 void *buffer;
a8170e5e
AK
2186 hwaddr addr;
2187 hwaddr len;
6d16c2f8
AL
2188} BounceBuffer;
2189
2190static BounceBuffer bounce;
2191
ba223c29
AL
2192typedef struct MapClient {
2193 void *opaque;
2194 void (*callback)(void *opaque);
72cf2d4f 2195 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2196} MapClient;
2197
72cf2d4f
BS
2198static QLIST_HEAD(map_client_list, MapClient) map_client_list
2199 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2200
2201void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2202{
7267c094 2203 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2204
2205 client->opaque = opaque;
2206 client->callback = callback;
72cf2d4f 2207 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2208 return client;
2209}
2210
8b9c99d9 2211static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2212{
2213 MapClient *client = (MapClient *)_client;
2214
72cf2d4f 2215 QLIST_REMOVE(client, link);
7267c094 2216 g_free(client);
ba223c29
AL
2217}
2218
2219static void cpu_notify_map_clients(void)
2220{
2221 MapClient *client;
2222
72cf2d4f
BS
2223 while (!QLIST_EMPTY(&map_client_list)) {
2224 client = QLIST_FIRST(&map_client_list);
ba223c29 2225 client->callback(client->opaque);
34d5e948 2226 cpu_unregister_map_client(client);
ba223c29
AL
2227 }
2228}
2229
51644ab7
PB
2230bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2231{
5c8a00ce 2232 MemoryRegion *mr;
51644ab7
PB
2233 hwaddr l, xlat;
2234
2235 while (len > 0) {
2236 l = len;
5c8a00ce
PB
2237 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2238 if (!memory_access_is_direct(mr, is_write)) {
2239 l = memory_access_size(mr, l, addr);
2240 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2241 return false;
2242 }
2243 }
2244
2245 len -= l;
2246 addr += l;
2247 }
2248 return true;
2249}
2250
6d16c2f8
AL
2251/* Map a physical memory region into a host virtual address.
2252 * May map a subset of the requested range, given by and returned in *plen.
2253 * May return NULL if resources needed to perform the mapping are exhausted.
2254 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2255 * Use cpu_register_map_client() to know when retrying the map operation is
2256 * likely to succeed.
6d16c2f8 2257 */
ac1970fb 2258void *address_space_map(AddressSpace *as,
a8170e5e
AK
2259 hwaddr addr,
2260 hwaddr *plen,
ac1970fb 2261 bool is_write)
6d16c2f8 2262{
a8170e5e 2263 hwaddr len = *plen;
e3127ae0
PB
2264 hwaddr done = 0;
2265 hwaddr l, xlat, base;
2266 MemoryRegion *mr, *this_mr;
2267 ram_addr_t raddr;
6d16c2f8 2268
e3127ae0
PB
2269 if (len == 0) {
2270 return NULL;
2271 }
38bee5dc 2272
e3127ae0
PB
2273 l = len;
2274 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2275 if (!memory_access_is_direct(mr, is_write)) {
2276 if (bounce.buffer) {
2277 return NULL;
6d16c2f8 2278 }
e85d9db5
KW
2279 /* Avoid unbounded allocations */
2280 l = MIN(l, TARGET_PAGE_SIZE);
2281 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2282 bounce.addr = addr;
2283 bounce.len = l;
d3e71559
PB
2284
2285 memory_region_ref(mr);
2286 bounce.mr = mr;
e3127ae0
PB
2287 if (!is_write) {
2288 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2289 }
6d16c2f8 2290
e3127ae0
PB
2291 *plen = l;
2292 return bounce.buffer;
2293 }
2294
2295 base = xlat;
2296 raddr = memory_region_get_ram_addr(mr);
2297
2298 for (;;) {
6d16c2f8
AL
2299 len -= l;
2300 addr += l;
e3127ae0
PB
2301 done += l;
2302 if (len == 0) {
2303 break;
2304 }
2305
2306 l = len;
2307 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2308 if (this_mr != mr || xlat != base + done) {
2309 break;
2310 }
6d16c2f8 2311 }
e3127ae0 2312
d3e71559 2313 memory_region_ref(mr);
e3127ae0
PB
2314 *plen = done;
2315 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2316}
2317
ac1970fb 2318/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2319 * Will also mark the memory as dirty if is_write == 1. access_len gives
2320 * the amount of memory that was actually read or written by the caller.
2321 */
a8170e5e
AK
2322void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2323 int is_write, hwaddr access_len)
6d16c2f8
AL
2324{
2325 if (buffer != bounce.buffer) {
d3e71559
PB
2326 MemoryRegion *mr;
2327 ram_addr_t addr1;
2328
2329 mr = qemu_ram_addr_from_host(buffer, &addr1);
2330 assert(mr != NULL);
6d16c2f8 2331 if (is_write) {
6d16c2f8
AL
2332 while (access_len) {
2333 unsigned l;
2334 l = TARGET_PAGE_SIZE;
2335 if (l > access_len)
2336 l = access_len;
51d7a9eb 2337 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2338 addr1 += l;
2339 access_len -= l;
2340 }
2341 }
868bb33f 2342 if (xen_enabled()) {
e41d7c69 2343 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2344 }
d3e71559 2345 memory_region_unref(mr);
6d16c2f8
AL
2346 return;
2347 }
2348 if (is_write) {
ac1970fb 2349 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2350 }
f8a83245 2351 qemu_vfree(bounce.buffer);
6d16c2f8 2352 bounce.buffer = NULL;
d3e71559 2353 memory_region_unref(bounce.mr);
ba223c29 2354 cpu_notify_map_clients();
6d16c2f8 2355}
d0ecd2aa 2356
a8170e5e
AK
2357void *cpu_physical_memory_map(hwaddr addr,
2358 hwaddr *plen,
ac1970fb
AK
2359 int is_write)
2360{
2361 return address_space_map(&address_space_memory, addr, plen, is_write);
2362}
2363
a8170e5e
AK
2364void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2365 int is_write, hwaddr access_len)
ac1970fb
AK
2366{
2367 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2368}
2369
8df1cd07 2370/* warning: addr must be aligned */
fdfba1a2 2371static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2372 enum device_endian endian)
8df1cd07 2373{
8df1cd07 2374 uint8_t *ptr;
791af8c8 2375 uint64_t val;
5c8a00ce 2376 MemoryRegion *mr;
149f54b5
PB
2377 hwaddr l = 4;
2378 hwaddr addr1;
8df1cd07 2379
fdfba1a2 2380 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2381 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2382 /* I/O case */
5c8a00ce 2383 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2384#if defined(TARGET_WORDS_BIGENDIAN)
2385 if (endian == DEVICE_LITTLE_ENDIAN) {
2386 val = bswap32(val);
2387 }
2388#else
2389 if (endian == DEVICE_BIG_ENDIAN) {
2390 val = bswap32(val);
2391 }
2392#endif
8df1cd07
FB
2393 } else {
2394 /* RAM case */
5c8a00ce 2395 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2396 & TARGET_PAGE_MASK)
149f54b5 2397 + addr1);
1e78bcc1
AG
2398 switch (endian) {
2399 case DEVICE_LITTLE_ENDIAN:
2400 val = ldl_le_p(ptr);
2401 break;
2402 case DEVICE_BIG_ENDIAN:
2403 val = ldl_be_p(ptr);
2404 break;
2405 default:
2406 val = ldl_p(ptr);
2407 break;
2408 }
8df1cd07
FB
2409 }
2410 return val;
2411}
2412
fdfba1a2 2413uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2414{
fdfba1a2 2415 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2416}
2417
fdfba1a2 2418uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2419{
fdfba1a2 2420 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2421}
2422
fdfba1a2 2423uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2424{
fdfba1a2 2425 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2426}
2427
84b7b8e7 2428/* warning: addr must be aligned */
2c17449b 2429static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2430 enum device_endian endian)
84b7b8e7 2431{
84b7b8e7
FB
2432 uint8_t *ptr;
2433 uint64_t val;
5c8a00ce 2434 MemoryRegion *mr;
149f54b5
PB
2435 hwaddr l = 8;
2436 hwaddr addr1;
84b7b8e7 2437
2c17449b 2438 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2439 false);
2440 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2441 /* I/O case */
5c8a00ce 2442 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2443#if defined(TARGET_WORDS_BIGENDIAN)
2444 if (endian == DEVICE_LITTLE_ENDIAN) {
2445 val = bswap64(val);
2446 }
2447#else
2448 if (endian == DEVICE_BIG_ENDIAN) {
2449 val = bswap64(val);
2450 }
84b7b8e7
FB
2451#endif
2452 } else {
2453 /* RAM case */
5c8a00ce 2454 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2455 & TARGET_PAGE_MASK)
149f54b5 2456 + addr1);
1e78bcc1
AG
2457 switch (endian) {
2458 case DEVICE_LITTLE_ENDIAN:
2459 val = ldq_le_p(ptr);
2460 break;
2461 case DEVICE_BIG_ENDIAN:
2462 val = ldq_be_p(ptr);
2463 break;
2464 default:
2465 val = ldq_p(ptr);
2466 break;
2467 }
84b7b8e7
FB
2468 }
2469 return val;
2470}
2471
2c17449b 2472uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2473{
2c17449b 2474 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2475}
2476
2c17449b 2477uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2478{
2c17449b 2479 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2480}
2481
2c17449b 2482uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2483{
2c17449b 2484 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2485}
2486
aab33094 2487/* XXX: optimize */
2c17449b 2488uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
aab33094
FB
2489{
2490 uint8_t val;
2c17449b 2491 address_space_rw(as, addr, &val, 1, 0);
aab33094
FB
2492 return val;
2493}
2494
733f0b02 2495/* warning: addr must be aligned */
41701aa4 2496static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2497 enum device_endian endian)
aab33094 2498{
733f0b02
MT
2499 uint8_t *ptr;
2500 uint64_t val;
5c8a00ce 2501 MemoryRegion *mr;
149f54b5
PB
2502 hwaddr l = 2;
2503 hwaddr addr1;
733f0b02 2504
41701aa4 2505 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2506 false);
2507 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2508 /* I/O case */
5c8a00ce 2509 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2510#if defined(TARGET_WORDS_BIGENDIAN)
2511 if (endian == DEVICE_LITTLE_ENDIAN) {
2512 val = bswap16(val);
2513 }
2514#else
2515 if (endian == DEVICE_BIG_ENDIAN) {
2516 val = bswap16(val);
2517 }
2518#endif
733f0b02
MT
2519 } else {
2520 /* RAM case */
5c8a00ce 2521 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2522 & TARGET_PAGE_MASK)
149f54b5 2523 + addr1);
1e78bcc1
AG
2524 switch (endian) {
2525 case DEVICE_LITTLE_ENDIAN:
2526 val = lduw_le_p(ptr);
2527 break;
2528 case DEVICE_BIG_ENDIAN:
2529 val = lduw_be_p(ptr);
2530 break;
2531 default:
2532 val = lduw_p(ptr);
2533 break;
2534 }
733f0b02
MT
2535 }
2536 return val;
aab33094
FB
2537}
2538
41701aa4 2539uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2540{
41701aa4 2541 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2542}
2543
41701aa4 2544uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2545{
41701aa4 2546 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2547}
2548
41701aa4 2549uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2550{
41701aa4 2551 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2552}
2553
8df1cd07
FB
2554/* warning: addr must be aligned. The ram page is not masked as dirty
2555 and the code inside is not invalidated. It is useful if the dirty
2556 bits are used to track modified PTEs */
2198a121 2557void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
8df1cd07 2558{
8df1cd07 2559 uint8_t *ptr;
5c8a00ce 2560 MemoryRegion *mr;
149f54b5
PB
2561 hwaddr l = 4;
2562 hwaddr addr1;
8df1cd07 2563
2198a121 2564 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2565 true);
2566 if (l < 4 || !memory_access_is_direct(mr, true)) {
2567 io_mem_write(mr, addr1, val, 4);
8df1cd07 2568 } else {
5c8a00ce 2569 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2570 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2571 stl_p(ptr, val);
74576198
AL
2572
2573 if (unlikely(in_migration)) {
a2cd8c85 2574 if (cpu_physical_memory_is_clean(addr1)) {
74576198
AL
2575 /* invalidate code */
2576 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2577 /* set dirty bit */
52159192
JQ
2578 cpu_physical_memory_set_dirty_flag(addr1,
2579 DIRTY_MEMORY_MIGRATION);
2580 cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
74576198
AL
2581 }
2582 }
8df1cd07
FB
2583 }
2584}
2585
2586/* warning: addr must be aligned */
ab1da857
EI
2587static inline void stl_phys_internal(AddressSpace *as,
2588 hwaddr addr, uint32_t val,
1e78bcc1 2589 enum device_endian endian)
8df1cd07 2590{
8df1cd07 2591 uint8_t *ptr;
5c8a00ce 2592 MemoryRegion *mr;
149f54b5
PB
2593 hwaddr l = 4;
2594 hwaddr addr1;
8df1cd07 2595
ab1da857 2596 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2597 true);
2598 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2599#if defined(TARGET_WORDS_BIGENDIAN)
2600 if (endian == DEVICE_LITTLE_ENDIAN) {
2601 val = bswap32(val);
2602 }
2603#else
2604 if (endian == DEVICE_BIG_ENDIAN) {
2605 val = bswap32(val);
2606 }
2607#endif
5c8a00ce 2608 io_mem_write(mr, addr1, val, 4);
8df1cd07 2609 } else {
8df1cd07 2610 /* RAM case */
5c8a00ce 2611 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2612 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2613 switch (endian) {
2614 case DEVICE_LITTLE_ENDIAN:
2615 stl_le_p(ptr, val);
2616 break;
2617 case DEVICE_BIG_ENDIAN:
2618 stl_be_p(ptr, val);
2619 break;
2620 default:
2621 stl_p(ptr, val);
2622 break;
2623 }
51d7a9eb 2624 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2625 }
2626}
2627
ab1da857 2628void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2629{
ab1da857 2630 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2631}
2632
ab1da857 2633void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2634{
ab1da857 2635 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2636}
2637
ab1da857 2638void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2639{
ab1da857 2640 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2641}
2642
aab33094 2643/* XXX: optimize */
db3be60d 2644void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
aab33094
FB
2645{
2646 uint8_t v = val;
db3be60d 2647 address_space_rw(as, addr, &v, 1, 1);
aab33094
FB
2648}
2649
733f0b02 2650/* warning: addr must be aligned */
5ce5944d
EI
2651static inline void stw_phys_internal(AddressSpace *as,
2652 hwaddr addr, uint32_t val,
1e78bcc1 2653 enum device_endian endian)
aab33094 2654{
733f0b02 2655 uint8_t *ptr;
5c8a00ce 2656 MemoryRegion *mr;
149f54b5
PB
2657 hwaddr l = 2;
2658 hwaddr addr1;
733f0b02 2659
5ce5944d 2660 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 2661 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2662#if defined(TARGET_WORDS_BIGENDIAN)
2663 if (endian == DEVICE_LITTLE_ENDIAN) {
2664 val = bswap16(val);
2665 }
2666#else
2667 if (endian == DEVICE_BIG_ENDIAN) {
2668 val = bswap16(val);
2669 }
2670#endif
5c8a00ce 2671 io_mem_write(mr, addr1, val, 2);
733f0b02 2672 } else {
733f0b02 2673 /* RAM case */
5c8a00ce 2674 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2675 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2676 switch (endian) {
2677 case DEVICE_LITTLE_ENDIAN:
2678 stw_le_p(ptr, val);
2679 break;
2680 case DEVICE_BIG_ENDIAN:
2681 stw_be_p(ptr, val);
2682 break;
2683 default:
2684 stw_p(ptr, val);
2685 break;
2686 }
51d7a9eb 2687 invalidate_and_set_dirty(addr1, 2);
733f0b02 2688 }
aab33094
FB
2689}
2690
5ce5944d 2691void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2692{
5ce5944d 2693 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2694}
2695
5ce5944d 2696void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2697{
5ce5944d 2698 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2699}
2700
5ce5944d 2701void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2702{
5ce5944d 2703 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2704}
2705
aab33094 2706/* XXX: optimize */
f606604f 2707void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
aab33094
FB
2708{
2709 val = tswap64(val);
f606604f 2710 address_space_rw(as, addr, (void *) &val, 8, 1);
aab33094
FB
2711}
2712
f606604f 2713void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2714{
2715 val = cpu_to_le64(val);
f606604f 2716 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2717}
2718
f606604f 2719void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2720{
2721 val = cpu_to_be64(val);
f606604f 2722 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2723}
2724
5e2972fd 2725/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2726int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2727 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2728{
2729 int l;
a8170e5e 2730 hwaddr phys_addr;
9b3c35e0 2731 target_ulong page;
13eb76e0
FB
2732
2733 while (len > 0) {
2734 page = addr & TARGET_PAGE_MASK;
f17ec444 2735 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2736 /* if no physical page mapped, return an error */
2737 if (phys_addr == -1)
2738 return -1;
2739 l = (page + TARGET_PAGE_SIZE) - addr;
2740 if (l > len)
2741 l = len;
5e2972fd 2742 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
2743 if (is_write) {
2744 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2745 } else {
2746 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2747 }
13eb76e0
FB
2748 len -= l;
2749 buf += l;
2750 addr += l;
2751 }
2752 return 0;
2753}
a68fe89c 2754#endif
13eb76e0 2755
8e4a424b
BS
2756#if !defined(CONFIG_USER_ONLY)
2757
2758/*
2759 * A helper function for the _utterly broken_ virtio device model to find out if
2760 * it's running on a big endian machine. Don't do this at home kids!
2761 */
2762bool virtio_is_big_endian(void);
2763bool virtio_is_big_endian(void)
2764{
2765#if defined(TARGET_WORDS_BIGENDIAN)
2766 return true;
2767#else
2768 return false;
2769#endif
2770}
2771
2772#endif
2773
76f35538 2774#ifndef CONFIG_USER_ONLY
a8170e5e 2775bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2776{
5c8a00ce 2777 MemoryRegion*mr;
149f54b5 2778 hwaddr l = 1;
76f35538 2779
5c8a00ce
PB
2780 mr = address_space_translate(&address_space_memory,
2781 phys_addr, &phys_addr, &l, false);
76f35538 2782
5c8a00ce
PB
2783 return !(memory_region_is_ram(mr) ||
2784 memory_region_is_romd(mr));
76f35538 2785}
bd2fa51f
MH
2786
2787void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2788{
2789 RAMBlock *block;
2790
2791 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2792 func(block->host, block->offset, block->length, opaque);
2793 }
2794}
ec3f8c99 2795#endif
This page took 1.317328 seconds and 4 git commands to generate.