]> Git Repo - qemu.git/blame - exec.c
PPC: Add support for Apple gdb in gdbstub
[qemu.git] / exec.c
CommitLineData
54936004 1/*
5b6dd868 2 * Virtual page mapping
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
777872e5 20#ifndef _WIN32
a98d49b1 21#include <sys/types.h>
d5a8f07c
FB
22#include <sys/mman.h>
23#endif
54936004 24
055403b2 25#include "qemu-common.h"
6180a181 26#include "cpu.h"
b67d9a52 27#include "tcg.h"
b3c7724c 28#include "hw/hw.h"
cc9e98cb 29#include "hw/qdev.h"
1de7afc9 30#include "qemu/osdep.h"
9c17d615 31#include "sysemu/kvm.h"
2ff3de68 32#include "sysemu/sysemu.h"
0d09e41a 33#include "hw/xen/xen.h"
1de7afc9
PB
34#include "qemu/timer.h"
35#include "qemu/config-file.h"
75a34036 36#include "qemu/error-report.h"
022c62cb 37#include "exec/memory.h"
9c17d615 38#include "sysemu/dma.h"
022c62cb 39#include "exec/address-spaces.h"
53a5960a
PB
40#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
432d268c 42#else /* !CONFIG_USER_ONLY */
9c17d615 43#include "sysemu/xen-mapcache.h"
6506e4f9 44#include "trace.h"
53a5960a 45#endif
0d6d3c87 46#include "exec/cpu-all.h"
54936004 47
022c62cb 48#include "exec/cputlb.h"
5b6dd868 49#include "translate-all.h"
0cac1b66 50
022c62cb 51#include "exec/memory-internal.h"
220c3ebd 52#include "exec/ram_addr.h"
67d95c15 53
b35ba30f
MT
54#include "qemu/range.h"
55
db7b5426 56//#define DEBUG_SUBPAGE
1196be37 57
e2eef170 58#if !defined(CONFIG_USER_ONLY)
981fdf23 59static bool in_migration;
94a6b54f 60
a3161038 61RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
62
63static MemoryRegion *system_memory;
309cb471 64static MemoryRegion *system_io;
62152b8a 65
f6790af6
AK
66AddressSpace address_space_io;
67AddressSpace address_space_memory;
2673a5da 68
0844e007 69MemoryRegion io_mem_rom, io_mem_notdirty;
acc9d80b 70static MemoryRegion io_mem_unassigned;
0e0df1e2 71
7bd4f430
PB
72/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
73#define RAM_PREALLOC (1 << 0)
74
dbcb8981
PB
75/* RAM is mmap-ed with MAP_SHARED */
76#define RAM_SHARED (1 << 1)
77
e2eef170 78#endif
9fa3e853 79
bdc44640 80struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
6a00d601
FB
81/* current CPU in the current thread. It is only valid inside
82 cpu_exec() */
4917cf44 83DEFINE_TLS(CPUState *, current_cpu);
2e70f6ef 84/* 0 = Do not count executed instructions.
bf20dc07 85 1 = Precise instruction counting.
2e70f6ef 86 2 = Adaptive rate instruction counting. */
5708fc66 87int use_icount;
6a00d601 88
e2eef170 89#if !defined(CONFIG_USER_ONLY)
4346ae3e 90
1db8abb1
PB
91typedef struct PhysPageEntry PhysPageEntry;
92
93struct PhysPageEntry {
9736e55b 94 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
8b795765 95 uint32_t skip : 6;
9736e55b 96 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
8b795765 97 uint32_t ptr : 26;
1db8abb1
PB
98};
99
8b795765
MT
100#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
101
03f49957 102/* Size of the L2 (and L3, etc) page tables. */
57271d63 103#define ADDR_SPACE_BITS 64
03f49957 104
026736ce 105#define P_L2_BITS 9
03f49957
PB
106#define P_L2_SIZE (1 << P_L2_BITS)
107
108#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
109
110typedef PhysPageEntry Node[P_L2_SIZE];
0475d94f 111
53cb28cb
MA
112typedef struct PhysPageMap {
113 unsigned sections_nb;
114 unsigned sections_nb_alloc;
115 unsigned nodes_nb;
116 unsigned nodes_nb_alloc;
117 Node *nodes;
118 MemoryRegionSection *sections;
119} PhysPageMap;
120
1db8abb1
PB
121struct AddressSpaceDispatch {
122 /* This is a multi-level map on the physical address space.
123 * The bottom level has pointers to MemoryRegionSections.
124 */
125 PhysPageEntry phys_map;
53cb28cb 126 PhysPageMap map;
acc9d80b 127 AddressSpace *as;
1db8abb1
PB
128};
129
90260c6c
JK
130#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
131typedef struct subpage_t {
132 MemoryRegion iomem;
acc9d80b 133 AddressSpace *as;
90260c6c
JK
134 hwaddr base;
135 uint16_t sub_section[TARGET_PAGE_SIZE];
136} subpage_t;
137
b41aac4f
LPF
138#define PHYS_SECTION_UNASSIGNED 0
139#define PHYS_SECTION_NOTDIRTY 1
140#define PHYS_SECTION_ROM 2
141#define PHYS_SECTION_WATCH 3
5312bd8b 142
e2eef170 143static void io_mem_init(void);
62152b8a 144static void memory_map_init(void);
09daed84 145static void tcg_commit(MemoryListener *listener);
e2eef170 146
1ec9b909 147static MemoryRegion io_mem_watch;
6658ffb8 148#endif
fd6ce8f6 149
6d9a1304 150#if !defined(CONFIG_USER_ONLY)
d6f2ea22 151
53cb28cb 152static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
d6f2ea22 153{
53cb28cb
MA
154 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
155 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
156 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
157 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
d6f2ea22 158 }
f7bf5461
AK
159}
160
53cb28cb 161static uint32_t phys_map_node_alloc(PhysPageMap *map)
f7bf5461
AK
162{
163 unsigned i;
8b795765 164 uint32_t ret;
f7bf5461 165
53cb28cb 166 ret = map->nodes_nb++;
f7bf5461 167 assert(ret != PHYS_MAP_NODE_NIL);
53cb28cb 168 assert(ret != map->nodes_nb_alloc);
03f49957 169 for (i = 0; i < P_L2_SIZE; ++i) {
53cb28cb
MA
170 map->nodes[ret][i].skip = 1;
171 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 172 }
f7bf5461 173 return ret;
d6f2ea22
AK
174}
175
53cb28cb
MA
176static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
177 hwaddr *index, hwaddr *nb, uint16_t leaf,
2999097b 178 int level)
f7bf5461
AK
179{
180 PhysPageEntry *p;
181 int i;
03f49957 182 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
108c49b8 183
9736e55b 184 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
53cb28cb
MA
185 lp->ptr = phys_map_node_alloc(map);
186 p = map->nodes[lp->ptr];
f7bf5461 187 if (level == 0) {
03f49957 188 for (i = 0; i < P_L2_SIZE; i++) {
9736e55b 189 p[i].skip = 0;
b41aac4f 190 p[i].ptr = PHYS_SECTION_UNASSIGNED;
4346ae3e 191 }
67c4d23c 192 }
f7bf5461 193 } else {
53cb28cb 194 p = map->nodes[lp->ptr];
92e873b9 195 }
03f49957 196 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
f7bf5461 197
03f49957 198 while (*nb && lp < &p[P_L2_SIZE]) {
07f07b31 199 if ((*index & (step - 1)) == 0 && *nb >= step) {
9736e55b 200 lp->skip = 0;
c19e8800 201 lp->ptr = leaf;
07f07b31
AK
202 *index += step;
203 *nb -= step;
2999097b 204 } else {
53cb28cb 205 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
2999097b
AK
206 }
207 ++lp;
f7bf5461
AK
208 }
209}
210
ac1970fb 211static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 212 hwaddr index, hwaddr nb,
2999097b 213 uint16_t leaf)
f7bf5461 214{
2999097b 215 /* Wildly overreserve - it doesn't matter much. */
53cb28cb 216 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
5cd2c5b6 217
53cb28cb 218 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
219}
220
b35ba30f
MT
221/* Compact a non leaf page entry. Simply detect that the entry has a single child,
222 * and update our entry so we can skip it and go directly to the destination.
223 */
224static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
225{
226 unsigned valid_ptr = P_L2_SIZE;
227 int valid = 0;
228 PhysPageEntry *p;
229 int i;
230
231 if (lp->ptr == PHYS_MAP_NODE_NIL) {
232 return;
233 }
234
235 p = nodes[lp->ptr];
236 for (i = 0; i < P_L2_SIZE; i++) {
237 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
238 continue;
239 }
240
241 valid_ptr = i;
242 valid++;
243 if (p[i].skip) {
244 phys_page_compact(&p[i], nodes, compacted);
245 }
246 }
247
248 /* We can only compress if there's only one child. */
249 if (valid != 1) {
250 return;
251 }
252
253 assert(valid_ptr < P_L2_SIZE);
254
255 /* Don't compress if it won't fit in the # of bits we have. */
256 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
257 return;
258 }
259
260 lp->ptr = p[valid_ptr].ptr;
261 if (!p[valid_ptr].skip) {
262 /* If our only child is a leaf, make this a leaf. */
263 /* By design, we should have made this node a leaf to begin with so we
264 * should never reach here.
265 * But since it's so simple to handle this, let's do it just in case we
266 * change this rule.
267 */
268 lp->skip = 0;
269 } else {
270 lp->skip += p[valid_ptr].skip;
271 }
272}
273
274static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
275{
276 DECLARE_BITMAP(compacted, nodes_nb);
277
278 if (d->phys_map.skip) {
53cb28cb 279 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
b35ba30f
MT
280 }
281}
282
97115a8d 283static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
9affd6fc 284 Node *nodes, MemoryRegionSection *sections)
92e873b9 285{
31ab2b4a 286 PhysPageEntry *p;
97115a8d 287 hwaddr index = addr >> TARGET_PAGE_BITS;
31ab2b4a 288 int i;
f1f6e3b8 289
9736e55b 290 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
c19e8800 291 if (lp.ptr == PHYS_MAP_NODE_NIL) {
9affd6fc 292 return &sections[PHYS_SECTION_UNASSIGNED];
31ab2b4a 293 }
9affd6fc 294 p = nodes[lp.ptr];
03f49957 295 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
5312bd8b 296 }
b35ba30f
MT
297
298 if (sections[lp.ptr].size.hi ||
299 range_covers_byte(sections[lp.ptr].offset_within_address_space,
300 sections[lp.ptr].size.lo, addr)) {
301 return &sections[lp.ptr];
302 } else {
303 return &sections[PHYS_SECTION_UNASSIGNED];
304 }
f3705d53
AK
305}
306
e5548617
BS
307bool memory_region_is_unassigned(MemoryRegion *mr)
308{
2a8e7499 309 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
5b6dd868 310 && mr != &io_mem_watch;
fd6ce8f6 311}
149f54b5 312
c7086b4a 313static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
90260c6c
JK
314 hwaddr addr,
315 bool resolve_subpage)
9f029603 316{
90260c6c
JK
317 MemoryRegionSection *section;
318 subpage_t *subpage;
319
53cb28cb 320 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
90260c6c
JK
321 if (resolve_subpage && section->mr->subpage) {
322 subpage = container_of(section->mr, subpage_t, iomem);
53cb28cb 323 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
90260c6c
JK
324 }
325 return section;
9f029603
JK
326}
327
90260c6c 328static MemoryRegionSection *
c7086b4a 329address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
90260c6c 330 hwaddr *plen, bool resolve_subpage)
149f54b5
PB
331{
332 MemoryRegionSection *section;
a87f3954 333 Int128 diff;
149f54b5 334
c7086b4a 335 section = address_space_lookup_region(d, addr, resolve_subpage);
149f54b5
PB
336 /* Compute offset within MemoryRegionSection */
337 addr -= section->offset_within_address_space;
338
339 /* Compute offset within MemoryRegion */
340 *xlat = addr + section->offset_within_region;
341
342 diff = int128_sub(section->mr->size, int128_make64(addr));
3752a036 343 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
149f54b5
PB
344 return section;
345}
90260c6c 346
a87f3954
PB
347static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
348{
349 if (memory_region_is_ram(mr)) {
350 return !(is_write && mr->readonly);
351 }
352 if (memory_region_is_romd(mr)) {
353 return !is_write;
354 }
355
356 return false;
357}
358
5c8a00ce
PB
359MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
360 hwaddr *xlat, hwaddr *plen,
361 bool is_write)
90260c6c 362{
30951157
AK
363 IOMMUTLBEntry iotlb;
364 MemoryRegionSection *section;
365 MemoryRegion *mr;
366 hwaddr len = *plen;
367
368 for (;;) {
a87f3954 369 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
30951157
AK
370 mr = section->mr;
371
372 if (!mr->iommu_ops) {
373 break;
374 }
375
376 iotlb = mr->iommu_ops->translate(mr, addr);
377 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
378 | (addr & iotlb.addr_mask));
379 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
380 if (!(iotlb.perm & (1 << is_write))) {
381 mr = &io_mem_unassigned;
382 break;
383 }
384
385 as = iotlb.target_as;
386 }
387
fe680d0d 388 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
a87f3954
PB
389 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
390 len = MIN(page, len);
391 }
392
30951157
AK
393 *plen = len;
394 *xlat = addr;
395 return mr;
90260c6c
JK
396}
397
398MemoryRegionSection *
399address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
400 hwaddr *plen)
401{
30951157 402 MemoryRegionSection *section;
c7086b4a 403 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
30951157
AK
404
405 assert(!section->mr->iommu_ops);
406 return section;
90260c6c 407}
5b6dd868 408#endif
fd6ce8f6 409
5b6dd868 410void cpu_exec_init_all(void)
fdbb84d1 411{
5b6dd868 412#if !defined(CONFIG_USER_ONLY)
b2a8658e 413 qemu_mutex_init(&ram_list.mutex);
5b6dd868
BS
414 memory_map_init();
415 io_mem_init();
fdbb84d1 416#endif
5b6dd868 417}
fdbb84d1 418
b170fce3 419#if !defined(CONFIG_USER_ONLY)
5b6dd868
BS
420
421static int cpu_common_post_load(void *opaque, int version_id)
fd6ce8f6 422{
259186a7 423 CPUState *cpu = opaque;
a513fe19 424
5b6dd868
BS
425 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
426 version_id is increased. */
259186a7 427 cpu->interrupt_request &= ~0x01;
c01a71c1 428 tlb_flush(cpu, 1);
5b6dd868
BS
429
430 return 0;
a513fe19 431}
7501267e 432
1a1562f5 433const VMStateDescription vmstate_cpu_common = {
5b6dd868
BS
434 .name = "cpu_common",
435 .version_id = 1,
436 .minimum_version_id = 1,
5b6dd868 437 .post_load = cpu_common_post_load,
35d08458 438 .fields = (VMStateField[]) {
259186a7
AF
439 VMSTATE_UINT32(halted, CPUState),
440 VMSTATE_UINT32(interrupt_request, CPUState),
5b6dd868
BS
441 VMSTATE_END_OF_LIST()
442 }
443};
1a1562f5 444
5b6dd868 445#endif
ea041c0e 446
38d8f5c8 447CPUState *qemu_get_cpu(int index)
ea041c0e 448{
bdc44640 449 CPUState *cpu;
ea041c0e 450
bdc44640 451 CPU_FOREACH(cpu) {
55e5c285 452 if (cpu->cpu_index == index) {
bdc44640 453 return cpu;
55e5c285 454 }
ea041c0e 455 }
5b6dd868 456
bdc44640 457 return NULL;
ea041c0e
FB
458}
459
09daed84
EI
460#if !defined(CONFIG_USER_ONLY)
461void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
462{
463 /* We only support one address space per cpu at the moment. */
464 assert(cpu->as == as);
465
466 if (cpu->tcg_as_listener) {
467 memory_listener_unregister(cpu->tcg_as_listener);
468 } else {
469 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
470 }
471 cpu->tcg_as_listener->commit = tcg_commit;
472 memory_listener_register(cpu->tcg_as_listener, as);
473}
474#endif
475
5b6dd868 476void cpu_exec_init(CPUArchState *env)
ea041c0e 477{
5b6dd868 478 CPUState *cpu = ENV_GET_CPU(env);
b170fce3 479 CPUClass *cc = CPU_GET_CLASS(cpu);
bdc44640 480 CPUState *some_cpu;
5b6dd868
BS
481 int cpu_index;
482
483#if defined(CONFIG_USER_ONLY)
484 cpu_list_lock();
485#endif
5b6dd868 486 cpu_index = 0;
bdc44640 487 CPU_FOREACH(some_cpu) {
5b6dd868
BS
488 cpu_index++;
489 }
55e5c285 490 cpu->cpu_index = cpu_index;
1b1ed8dc 491 cpu->numa_node = 0;
f0c3c505 492 QTAILQ_INIT(&cpu->breakpoints);
ff4700b0 493 QTAILQ_INIT(&cpu->watchpoints);
5b6dd868 494#ifndef CONFIG_USER_ONLY
09daed84 495 cpu->as = &address_space_memory;
5b6dd868
BS
496 cpu->thread_id = qemu_get_thread_id();
497#endif
bdc44640 498 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
5b6dd868
BS
499#if defined(CONFIG_USER_ONLY)
500 cpu_list_unlock();
501#endif
e0d47944
AF
502 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
503 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
504 }
5b6dd868 505#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
5b6dd868
BS
506 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
507 cpu_save, cpu_load, env);
b170fce3 508 assert(cc->vmsd == NULL);
e0d47944 509 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
5b6dd868 510#endif
b170fce3
AF
511 if (cc->vmsd != NULL) {
512 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
513 }
ea041c0e
FB
514}
515
1fddef4b 516#if defined(TARGET_HAS_ICE)
94df27fd 517#if defined(CONFIG_USER_ONLY)
00b941e5 518static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
94df27fd
PB
519{
520 tb_invalidate_phys_page_range(pc, pc + 1, 0);
521}
522#else
00b941e5 523static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
1e7855a5 524{
e8262a1b
MF
525 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
526 if (phys != -1) {
09daed84 527 tb_invalidate_phys_addr(cpu->as,
29d8ec7b 528 phys | (pc & ~TARGET_PAGE_MASK));
e8262a1b 529 }
1e7855a5 530}
c27004ec 531#endif
94df27fd 532#endif /* TARGET_HAS_ICE */
d720b93d 533
c527ee8f 534#if defined(CONFIG_USER_ONLY)
75a34036 535void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
c527ee8f
PB
536
537{
538}
539
75a34036 540int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
c527ee8f
PB
541 int flags, CPUWatchpoint **watchpoint)
542{
543 return -ENOSYS;
544}
545#else
6658ffb8 546/* Add a watchpoint. */
75a34036 547int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 548 int flags, CPUWatchpoint **watchpoint)
6658ffb8 549{
75a34036 550 vaddr len_mask = ~(len - 1);
c0ce998e 551 CPUWatchpoint *wp;
6658ffb8 552
b4051334 553 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
554 if ((len & (len - 1)) || (addr & ~len_mask) ||
555 len == 0 || len > TARGET_PAGE_SIZE) {
75a34036
AF
556 error_report("tried to set invalid watchpoint at %"
557 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
b4051334
AL
558 return -EINVAL;
559 }
7267c094 560 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
561
562 wp->vaddr = addr;
b4051334 563 wp->len_mask = len_mask;
a1d1bb31
AL
564 wp->flags = flags;
565
2dc9f411 566 /* keep all GDB-injected watchpoints in front */
ff4700b0
AF
567 if (flags & BP_GDB) {
568 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
569 } else {
570 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
571 }
6658ffb8 572
31b030d4 573 tlb_flush_page(cpu, addr);
a1d1bb31
AL
574
575 if (watchpoint)
576 *watchpoint = wp;
577 return 0;
6658ffb8
PB
578}
579
a1d1bb31 580/* Remove a specific watchpoint. */
75a34036 581int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
a1d1bb31 582 int flags)
6658ffb8 583{
75a34036 584 vaddr len_mask = ~(len - 1);
a1d1bb31 585 CPUWatchpoint *wp;
6658ffb8 586
ff4700b0 587 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
b4051334 588 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 589 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
75a34036 590 cpu_watchpoint_remove_by_ref(cpu, wp);
6658ffb8
PB
591 return 0;
592 }
593 }
a1d1bb31 594 return -ENOENT;
6658ffb8
PB
595}
596
a1d1bb31 597/* Remove a specific watchpoint by reference. */
75a34036 598void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
a1d1bb31 599{
ff4700b0 600 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
7d03f82f 601
31b030d4 602 tlb_flush_page(cpu, watchpoint->vaddr);
a1d1bb31 603
7267c094 604 g_free(watchpoint);
a1d1bb31
AL
605}
606
607/* Remove all matching watchpoints. */
75a34036 608void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31 609{
c0ce998e 610 CPUWatchpoint *wp, *next;
a1d1bb31 611
ff4700b0 612 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
75a34036
AF
613 if (wp->flags & mask) {
614 cpu_watchpoint_remove_by_ref(cpu, wp);
615 }
c0ce998e 616 }
7d03f82f 617}
c527ee8f 618#endif
7d03f82f 619
a1d1bb31 620/* Add a breakpoint. */
b3310ab3 621int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
a1d1bb31 622 CPUBreakpoint **breakpoint)
4c3a88a2 623{
1fddef4b 624#if defined(TARGET_HAS_ICE)
c0ce998e 625 CPUBreakpoint *bp;
3b46e624 626
7267c094 627 bp = g_malloc(sizeof(*bp));
4c3a88a2 628
a1d1bb31
AL
629 bp->pc = pc;
630 bp->flags = flags;
631
2dc9f411 632 /* keep all GDB-injected breakpoints in front */
00b941e5 633 if (flags & BP_GDB) {
f0c3c505 634 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
00b941e5 635 } else {
f0c3c505 636 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
00b941e5 637 }
3b46e624 638
f0c3c505 639 breakpoint_invalidate(cpu, pc);
a1d1bb31 640
00b941e5 641 if (breakpoint) {
a1d1bb31 642 *breakpoint = bp;
00b941e5 643 }
4c3a88a2
FB
644 return 0;
645#else
a1d1bb31 646 return -ENOSYS;
4c3a88a2
FB
647#endif
648}
649
a1d1bb31 650/* Remove a specific breakpoint. */
b3310ab3 651int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
a1d1bb31 652{
7d03f82f 653#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
654 CPUBreakpoint *bp;
655
f0c3c505 656 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
a1d1bb31 657 if (bp->pc == pc && bp->flags == flags) {
b3310ab3 658 cpu_breakpoint_remove_by_ref(cpu, bp);
a1d1bb31
AL
659 return 0;
660 }
7d03f82f 661 }
a1d1bb31
AL
662 return -ENOENT;
663#else
664 return -ENOSYS;
7d03f82f
EI
665#endif
666}
667
a1d1bb31 668/* Remove a specific breakpoint by reference. */
b3310ab3 669void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
4c3a88a2 670{
1fddef4b 671#if defined(TARGET_HAS_ICE)
f0c3c505
AF
672 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
673
674 breakpoint_invalidate(cpu, breakpoint->pc);
a1d1bb31 675
7267c094 676 g_free(breakpoint);
a1d1bb31
AL
677#endif
678}
679
680/* Remove all matching breakpoints. */
b3310ab3 681void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
a1d1bb31
AL
682{
683#if defined(TARGET_HAS_ICE)
c0ce998e 684 CPUBreakpoint *bp, *next;
a1d1bb31 685
f0c3c505 686 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
b3310ab3
AF
687 if (bp->flags & mask) {
688 cpu_breakpoint_remove_by_ref(cpu, bp);
689 }
c0ce998e 690 }
4c3a88a2
FB
691#endif
692}
693
c33a346e
FB
694/* enable or disable single step mode. EXCP_DEBUG is returned by the
695 CPU loop after each instruction */
3825b28f 696void cpu_single_step(CPUState *cpu, int enabled)
c33a346e 697{
1fddef4b 698#if defined(TARGET_HAS_ICE)
ed2803da
AF
699 if (cpu->singlestep_enabled != enabled) {
700 cpu->singlestep_enabled = enabled;
701 if (kvm_enabled()) {
38e478ec 702 kvm_update_guest_debug(cpu, 0);
ed2803da 703 } else {
ccbb4d44 704 /* must flush all the translated code to avoid inconsistencies */
e22a25c9 705 /* XXX: only flush what is necessary */
38e478ec 706 CPUArchState *env = cpu->env_ptr;
e22a25c9
AL
707 tb_flush(env);
708 }
c33a346e
FB
709 }
710#endif
711}
712
a47dddd7 713void cpu_abort(CPUState *cpu, const char *fmt, ...)
7501267e
FB
714{
715 va_list ap;
493ae1f0 716 va_list ap2;
7501267e
FB
717
718 va_start(ap, fmt);
493ae1f0 719 va_copy(ap2, ap);
7501267e
FB
720 fprintf(stderr, "qemu: fatal: ");
721 vfprintf(stderr, fmt, ap);
722 fprintf(stderr, "\n");
878096ee 723 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
724 if (qemu_log_enabled()) {
725 qemu_log("qemu: fatal: ");
726 qemu_log_vprintf(fmt, ap2);
727 qemu_log("\n");
a0762859 728 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 729 qemu_log_flush();
93fcfe39 730 qemu_log_close();
924edcae 731 }
493ae1f0 732 va_end(ap2);
f9373291 733 va_end(ap);
fd052bf6
RV
734#if defined(CONFIG_USER_ONLY)
735 {
736 struct sigaction act;
737 sigfillset(&act.sa_mask);
738 act.sa_handler = SIG_DFL;
739 sigaction(SIGABRT, &act, NULL);
740 }
741#endif
7501267e
FB
742 abort();
743}
744
0124311e 745#if !defined(CONFIG_USER_ONLY)
041603fe
PB
746static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
747{
748 RAMBlock *block;
749
750 /* The list is protected by the iothread lock here. */
751 block = ram_list.mru_block;
752 if (block && addr - block->offset < block->length) {
753 goto found;
754 }
755 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
756 if (addr - block->offset < block->length) {
757 goto found;
758 }
759 }
760
761 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
762 abort();
763
764found:
765 ram_list.mru_block = block;
766 return block;
767}
768
a2f4d5be 769static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
d24981d3 770{
041603fe 771 ram_addr_t start1;
a2f4d5be
JQ
772 RAMBlock *block;
773 ram_addr_t end;
774
775 end = TARGET_PAGE_ALIGN(start + length);
776 start &= TARGET_PAGE_MASK;
d24981d3 777
041603fe
PB
778 block = qemu_get_ram_block(start);
779 assert(block == qemu_get_ram_block(end - 1));
780 start1 = (uintptr_t)block->host + (start - block->offset);
781 cpu_tlb_reset_dirty_all(start1, length);
d24981d3
JQ
782}
783
5579c7f3 784/* Note: start and end must be within the same ram block. */
a2f4d5be 785void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
52159192 786 unsigned client)
1ccde1cb 787{
1ccde1cb
FB
788 if (length == 0)
789 return;
ace694cc 790 cpu_physical_memory_clear_dirty_range(start, length, client);
f23db169 791
d24981d3 792 if (tcg_enabled()) {
a2f4d5be 793 tlb_reset_dirty_range_all(start, length);
5579c7f3 794 }
1ccde1cb
FB
795}
796
981fdf23 797static void cpu_physical_memory_set_dirty_tracking(bool enable)
74576198
AL
798{
799 in_migration = enable;
74576198
AL
800}
801
bb0e627a 802hwaddr memory_region_section_get_iotlb(CPUState *cpu,
149f54b5
PB
803 MemoryRegionSection *section,
804 target_ulong vaddr,
805 hwaddr paddr, hwaddr xlat,
806 int prot,
807 target_ulong *address)
e5548617 808{
a8170e5e 809 hwaddr iotlb;
e5548617
BS
810 CPUWatchpoint *wp;
811
cc5bea60 812 if (memory_region_is_ram(section->mr)) {
e5548617
BS
813 /* Normal RAM. */
814 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
149f54b5 815 + xlat;
e5548617 816 if (!section->readonly) {
b41aac4f 817 iotlb |= PHYS_SECTION_NOTDIRTY;
e5548617 818 } else {
b41aac4f 819 iotlb |= PHYS_SECTION_ROM;
e5548617
BS
820 }
821 } else {
1b3fb98f 822 iotlb = section - section->address_space->dispatch->map.sections;
149f54b5 823 iotlb += xlat;
e5548617
BS
824 }
825
826 /* Make accesses to pages with watchpoints go via the
827 watchpoint trap routines. */
ff4700b0 828 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
e5548617
BS
829 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
830 /* Avoid trapping reads of pages with a write breakpoint. */
831 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
b41aac4f 832 iotlb = PHYS_SECTION_WATCH + paddr;
e5548617
BS
833 *address |= TLB_MMIO;
834 break;
835 }
836 }
837 }
838
839 return iotlb;
840}
9fa3e853
FB
841#endif /* defined(CONFIG_USER_ONLY) */
842
e2eef170 843#if !defined(CONFIG_USER_ONLY)
8da3ff18 844
c227f099 845static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 846 uint16_t section);
acc9d80b 847static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
54688b1e 848
575ddeb4 849static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
91138037
MA
850
851/*
852 * Set a custom physical guest memory alloator.
853 * Accelerators with unusual needs may need this. Hopefully, we can
854 * get rid of it eventually.
855 */
575ddeb4 856void phys_mem_set_alloc(void *(*alloc)(size_t))
91138037
MA
857{
858 phys_mem_alloc = alloc;
859}
860
53cb28cb
MA
861static uint16_t phys_section_add(PhysPageMap *map,
862 MemoryRegionSection *section)
5312bd8b 863{
68f3f65b
PB
864 /* The physical section number is ORed with a page-aligned
865 * pointer to produce the iotlb entries. Thus it should
866 * never overflow into the page-aligned value.
867 */
53cb28cb 868 assert(map->sections_nb < TARGET_PAGE_SIZE);
68f3f65b 869
53cb28cb
MA
870 if (map->sections_nb == map->sections_nb_alloc) {
871 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
872 map->sections = g_renew(MemoryRegionSection, map->sections,
873 map->sections_nb_alloc);
5312bd8b 874 }
53cb28cb 875 map->sections[map->sections_nb] = *section;
dfde4e6e 876 memory_region_ref(section->mr);
53cb28cb 877 return map->sections_nb++;
5312bd8b
AK
878}
879
058bc4b5
PB
880static void phys_section_destroy(MemoryRegion *mr)
881{
dfde4e6e
PB
882 memory_region_unref(mr);
883
058bc4b5
PB
884 if (mr->subpage) {
885 subpage_t *subpage = container_of(mr, subpage_t, iomem);
886 memory_region_destroy(&subpage->iomem);
887 g_free(subpage);
888 }
889}
890
6092666e 891static void phys_sections_free(PhysPageMap *map)
5312bd8b 892{
9affd6fc
PB
893 while (map->sections_nb > 0) {
894 MemoryRegionSection *section = &map->sections[--map->sections_nb];
058bc4b5
PB
895 phys_section_destroy(section->mr);
896 }
9affd6fc
PB
897 g_free(map->sections);
898 g_free(map->nodes);
5312bd8b
AK
899}
900
ac1970fb 901static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
902{
903 subpage_t *subpage;
a8170e5e 904 hwaddr base = section->offset_within_address_space
0f0cb164 905 & TARGET_PAGE_MASK;
97115a8d 906 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
53cb28cb 907 d->map.nodes, d->map.sections);
0f0cb164
AK
908 MemoryRegionSection subsection = {
909 .offset_within_address_space = base,
052e87b0 910 .size = int128_make64(TARGET_PAGE_SIZE),
0f0cb164 911 };
a8170e5e 912 hwaddr start, end;
0f0cb164 913
f3705d53 914 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 915
f3705d53 916 if (!(existing->mr->subpage)) {
acc9d80b 917 subpage = subpage_init(d->as, base);
3be91e86 918 subsection.address_space = d->as;
0f0cb164 919 subsection.mr = &subpage->iomem;
ac1970fb 920 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
53cb28cb 921 phys_section_add(&d->map, &subsection));
0f0cb164 922 } else {
f3705d53 923 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
924 }
925 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
052e87b0 926 end = start + int128_get64(section->size) - 1;
53cb28cb
MA
927 subpage_register(subpage, start, end,
928 phys_section_add(&d->map, section));
0f0cb164
AK
929}
930
931
052e87b0
PB
932static void register_multipage(AddressSpaceDispatch *d,
933 MemoryRegionSection *section)
33417e70 934{
a8170e5e 935 hwaddr start_addr = section->offset_within_address_space;
53cb28cb 936 uint16_t section_index = phys_section_add(&d->map, section);
052e87b0
PB
937 uint64_t num_pages = int128_get64(int128_rshift(section->size,
938 TARGET_PAGE_BITS));
dd81124b 939
733d5ef5
PB
940 assert(num_pages);
941 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
33417e70
FB
942}
943
ac1970fb 944static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 945{
89ae337a 946 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
00752703 947 AddressSpaceDispatch *d = as->next_dispatch;
99b9cc06 948 MemoryRegionSection now = *section, remain = *section;
052e87b0 949 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
0f0cb164 950
733d5ef5
PB
951 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
952 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
953 - now.offset_within_address_space;
954
052e87b0 955 now.size = int128_min(int128_make64(left), now.size);
ac1970fb 956 register_subpage(d, &now);
733d5ef5 957 } else {
052e87b0 958 now.size = int128_zero();
733d5ef5 959 }
052e87b0
PB
960 while (int128_ne(remain.size, now.size)) {
961 remain.size = int128_sub(remain.size, now.size);
962 remain.offset_within_address_space += int128_get64(now.size);
963 remain.offset_within_region += int128_get64(now.size);
69b67646 964 now = remain;
052e87b0 965 if (int128_lt(remain.size, page_size)) {
733d5ef5 966 register_subpage(d, &now);
88266249 967 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
052e87b0 968 now.size = page_size;
ac1970fb 969 register_subpage(d, &now);
69b67646 970 } else {
052e87b0 971 now.size = int128_and(now.size, int128_neg(page_size));
ac1970fb 972 register_multipage(d, &now);
69b67646 973 }
0f0cb164
AK
974 }
975}
976
62a2744c
SY
977void qemu_flush_coalesced_mmio_buffer(void)
978{
979 if (kvm_enabled())
980 kvm_flush_coalesced_mmio_buffer();
981}
982
b2a8658e
UD
983void qemu_mutex_lock_ramlist(void)
984{
985 qemu_mutex_lock(&ram_list.mutex);
986}
987
988void qemu_mutex_unlock_ramlist(void)
989{
990 qemu_mutex_unlock(&ram_list.mutex);
991}
992
e1e84ba0 993#ifdef __linux__
c902760f
MT
994
995#include <sys/vfs.h>
996
997#define HUGETLBFS_MAGIC 0x958458f6
998
999static long gethugepagesize(const char *path)
1000{
1001 struct statfs fs;
1002 int ret;
1003
1004 do {
9742bf26 1005 ret = statfs(path, &fs);
c902760f
MT
1006 } while (ret != 0 && errno == EINTR);
1007
1008 if (ret != 0) {
9742bf26
YT
1009 perror(path);
1010 return 0;
c902760f
MT
1011 }
1012
1013 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 1014 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
1015
1016 return fs.f_bsize;
1017}
1018
04b16653
AW
1019static void *file_ram_alloc(RAMBlock *block,
1020 ram_addr_t memory,
7f56e740
PB
1021 const char *path,
1022 Error **errp)
c902760f
MT
1023{
1024 char *filename;
8ca761f6
PF
1025 char *sanitized_name;
1026 char *c;
c902760f
MT
1027 void *area;
1028 int fd;
c902760f
MT
1029 unsigned long hpagesize;
1030
1031 hpagesize = gethugepagesize(path);
1032 if (!hpagesize) {
f9a49dfa 1033 goto error;
c902760f
MT
1034 }
1035
1036 if (memory < hpagesize) {
1037 return NULL;
1038 }
1039
1040 if (kvm_enabled() && !kvm_has_sync_mmu()) {
7f56e740
PB
1041 error_setg(errp,
1042 "host lacks kvm mmu notifiers, -mem-path unsupported");
f9a49dfa 1043 goto error;
c902760f
MT
1044 }
1045
8ca761f6
PF
1046 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1047 sanitized_name = g_strdup(block->mr->name);
1048 for (c = sanitized_name; *c != '\0'; c++) {
1049 if (*c == '/')
1050 *c = '_';
1051 }
1052
1053 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1054 sanitized_name);
1055 g_free(sanitized_name);
c902760f
MT
1056
1057 fd = mkstemp(filename);
1058 if (fd < 0) {
7f56e740
PB
1059 error_setg_errno(errp, errno,
1060 "unable to create backing store for hugepages");
e4ada482 1061 g_free(filename);
f9a49dfa 1062 goto error;
c902760f
MT
1063 }
1064 unlink(filename);
e4ada482 1065 g_free(filename);
c902760f
MT
1066
1067 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1068
1069 /*
1070 * ftruncate is not supported by hugetlbfs in older
1071 * hosts, so don't bother bailing out on errors.
1072 * If anything goes wrong with it under other filesystems,
1073 * mmap will fail.
1074 */
7f56e740 1075 if (ftruncate(fd, memory)) {
9742bf26 1076 perror("ftruncate");
7f56e740 1077 }
c902760f 1078
dbcb8981
PB
1079 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1080 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1081 fd, 0);
c902760f 1082 if (area == MAP_FAILED) {
7f56e740
PB
1083 error_setg_errno(errp, errno,
1084 "unable to map backing store for hugepages");
9742bf26 1085 close(fd);
f9a49dfa 1086 goto error;
c902760f 1087 }
ef36fa14
MT
1088
1089 if (mem_prealloc) {
38183310 1090 os_mem_prealloc(fd, area, memory);
ef36fa14
MT
1091 }
1092
04b16653 1093 block->fd = fd;
c902760f 1094 return area;
f9a49dfa
MT
1095
1096error:
1097 if (mem_prealloc) {
1098 exit(1);
1099 }
1100 return NULL;
c902760f
MT
1101}
1102#endif
1103
d17b5288 1104static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
1105{
1106 RAMBlock *block, *next_block;
3e837b2c 1107 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653 1108
49cd9ac6
SH
1109 assert(size != 0); /* it would hand out same offset multiple times */
1110
a3161038 1111 if (QTAILQ_EMPTY(&ram_list.blocks))
04b16653
AW
1112 return 0;
1113
a3161038 1114 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 1115 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
1116
1117 end = block->offset + block->length;
1118
a3161038 1119 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
04b16653
AW
1120 if (next_block->offset >= end) {
1121 next = MIN(next, next_block->offset);
1122 }
1123 }
1124 if (next - end >= size && next - end < mingap) {
3e837b2c 1125 offset = end;
04b16653
AW
1126 mingap = next - end;
1127 }
1128 }
3e837b2c
AW
1129
1130 if (offset == RAM_ADDR_MAX) {
1131 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1132 (uint64_t)size);
1133 abort();
1134 }
1135
04b16653
AW
1136 return offset;
1137}
1138
652d7ec2 1139ram_addr_t last_ram_offset(void)
d17b5288
AW
1140{
1141 RAMBlock *block;
1142 ram_addr_t last = 0;
1143
a3161038 1144 QTAILQ_FOREACH(block, &ram_list.blocks, next)
d17b5288
AW
1145 last = MAX(last, block->offset + block->length);
1146
1147 return last;
1148}
1149
ddb97f1d
JB
1150static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1151{
1152 int ret;
ddb97f1d
JB
1153
1154 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2ff3de68
MA
1155 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1156 "dump-guest-core", true)) {
ddb97f1d
JB
1157 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1158 if (ret) {
1159 perror("qemu_madvise");
1160 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1161 "but dump_guest_core=off specified\n");
1162 }
1163 }
1164}
1165
20cfe881 1166static RAMBlock *find_ram_block(ram_addr_t addr)
84b89d78 1167{
20cfe881 1168 RAMBlock *block;
84b89d78 1169
a3161038 1170 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1171 if (block->offset == addr) {
20cfe881 1172 return block;
c5705a77
AK
1173 }
1174 }
20cfe881
HT
1175
1176 return NULL;
1177}
1178
1179void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1180{
1181 RAMBlock *new_block = find_ram_block(addr);
1182 RAMBlock *block;
1183
c5705a77
AK
1184 assert(new_block);
1185 assert(!new_block->idstr[0]);
84b89d78 1186
09e5ab63
AL
1187 if (dev) {
1188 char *id = qdev_get_dev_path(dev);
84b89d78
CM
1189 if (id) {
1190 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 1191 g_free(id);
84b89d78
CM
1192 }
1193 }
1194 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1195
b2a8658e
UD
1196 /* This assumes the iothread lock is taken here too. */
1197 qemu_mutex_lock_ramlist();
a3161038 1198 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
c5705a77 1199 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
1200 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1201 new_block->idstr);
1202 abort();
1203 }
1204 }
b2a8658e 1205 qemu_mutex_unlock_ramlist();
c5705a77
AK
1206}
1207
20cfe881
HT
1208void qemu_ram_unset_idstr(ram_addr_t addr)
1209{
1210 RAMBlock *block = find_ram_block(addr);
1211
1212 if (block) {
1213 memset(block->idstr, 0, sizeof(block->idstr));
1214 }
1215}
1216
8490fc78
LC
1217static int memory_try_enable_merging(void *addr, size_t len)
1218{
2ff3de68 1219 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
8490fc78
LC
1220 /* disabled by the user */
1221 return 0;
1222 }
1223
1224 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1225}
1226
e1c57ab8 1227static ram_addr_t ram_block_add(RAMBlock *new_block)
c5705a77 1228{
e1c57ab8 1229 RAMBlock *block;
2152f5ca
JQ
1230 ram_addr_t old_ram_size, new_ram_size;
1231
1232 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
c5705a77 1233
b2a8658e
UD
1234 /* This assumes the iothread lock is taken here too. */
1235 qemu_mutex_lock_ramlist();
e1c57ab8
PB
1236 new_block->offset = find_ram_offset(new_block->length);
1237
1238 if (!new_block->host) {
1239 if (xen_enabled()) {
1240 xen_ram_alloc(new_block->offset, new_block->length, new_block->mr);
1241 } else {
1242 new_block->host = phys_mem_alloc(new_block->length);
39228250
MA
1243 if (!new_block->host) {
1244 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1245 new_block->mr->name, strerror(errno));
1246 exit(1);
1247 }
e1c57ab8 1248 memory_try_enable_merging(new_block->host, new_block->length);
6977dfe6 1249 }
c902760f 1250 }
94a6b54f 1251
abb26d63
PB
1252 /* Keep the list sorted from biggest to smallest block. */
1253 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1254 if (block->length < new_block->length) {
1255 break;
1256 }
1257 }
1258 if (block) {
1259 QTAILQ_INSERT_BEFORE(block, new_block, next);
1260 } else {
1261 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1262 }
0d6d3c87 1263 ram_list.mru_block = NULL;
94a6b54f 1264
f798b07f 1265 ram_list.version++;
b2a8658e 1266 qemu_mutex_unlock_ramlist();
f798b07f 1267
2152f5ca
JQ
1268 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1269
1270 if (new_ram_size > old_ram_size) {
1ab4c8ce
JQ
1271 int i;
1272 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1273 ram_list.dirty_memory[i] =
1274 bitmap_zero_extend(ram_list.dirty_memory[i],
1275 old_ram_size, new_ram_size);
1276 }
2152f5ca 1277 }
e1c57ab8 1278 cpu_physical_memory_set_dirty_range(new_block->offset, new_block->length);
94a6b54f 1279
e1c57ab8
PB
1280 qemu_ram_setup_dump(new_block->host, new_block->length);
1281 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE);
1282 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK);
ddb97f1d 1283
e1c57ab8
PB
1284 if (kvm_enabled()) {
1285 kvm_setup_guest_memory(new_block->host, new_block->length);
1286 }
6f0437e8 1287
94a6b54f
PB
1288 return new_block->offset;
1289}
e9a1ab19 1290
0b183fc8 1291#ifdef __linux__
e1c57ab8 1292ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
dbcb8981 1293 bool share, const char *mem_path,
7f56e740 1294 Error **errp)
e1c57ab8
PB
1295{
1296 RAMBlock *new_block;
1297
1298 if (xen_enabled()) {
7f56e740
PB
1299 error_setg(errp, "-mem-path not supported with Xen");
1300 return -1;
e1c57ab8
PB
1301 }
1302
1303 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1304 /*
1305 * file_ram_alloc() needs to allocate just like
1306 * phys_mem_alloc, but we haven't bothered to provide
1307 * a hook there.
1308 */
7f56e740
PB
1309 error_setg(errp,
1310 "-mem-path not supported with this accelerator");
1311 return -1;
e1c57ab8
PB
1312 }
1313
1314 size = TARGET_PAGE_ALIGN(size);
1315 new_block = g_malloc0(sizeof(*new_block));
1316 new_block->mr = mr;
1317 new_block->length = size;
dbcb8981 1318 new_block->flags = share ? RAM_SHARED : 0;
7f56e740
PB
1319 new_block->host = file_ram_alloc(new_block, size,
1320 mem_path, errp);
1321 if (!new_block->host) {
1322 g_free(new_block);
1323 return -1;
1324 }
1325
e1c57ab8
PB
1326 return ram_block_add(new_block);
1327}
0b183fc8 1328#endif
e1c57ab8
PB
1329
1330ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1331 MemoryRegion *mr)
1332{
1333 RAMBlock *new_block;
1334
1335 size = TARGET_PAGE_ALIGN(size);
1336 new_block = g_malloc0(sizeof(*new_block));
1337 new_block->mr = mr;
1338 new_block->length = size;
1339 new_block->fd = -1;
1340 new_block->host = host;
1341 if (host) {
7bd4f430 1342 new_block->flags |= RAM_PREALLOC;
e1c57ab8
PB
1343 }
1344 return ram_block_add(new_block);
1345}
1346
c5705a77 1347ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 1348{
c5705a77 1349 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
1350}
1351
1f2e98b6
AW
1352void qemu_ram_free_from_ptr(ram_addr_t addr)
1353{
1354 RAMBlock *block;
1355
b2a8658e
UD
1356 /* This assumes the iothread lock is taken here too. */
1357 qemu_mutex_lock_ramlist();
a3161038 1358 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1f2e98b6 1359 if (addr == block->offset) {
a3161038 1360 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1361 ram_list.mru_block = NULL;
f798b07f 1362 ram_list.version++;
7267c094 1363 g_free(block);
b2a8658e 1364 break;
1f2e98b6
AW
1365 }
1366 }
b2a8658e 1367 qemu_mutex_unlock_ramlist();
1f2e98b6
AW
1368}
1369
c227f099 1370void qemu_ram_free(ram_addr_t addr)
e9a1ab19 1371{
04b16653
AW
1372 RAMBlock *block;
1373
b2a8658e
UD
1374 /* This assumes the iothread lock is taken here too. */
1375 qemu_mutex_lock_ramlist();
a3161038 1376 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
04b16653 1377 if (addr == block->offset) {
a3161038 1378 QTAILQ_REMOVE(&ram_list.blocks, block, next);
0d6d3c87 1379 ram_list.mru_block = NULL;
f798b07f 1380 ram_list.version++;
7bd4f430 1381 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1382 ;
dfeaf2ab
MA
1383 } else if (xen_enabled()) {
1384 xen_invalidate_map_cache_entry(block->host);
089f3f76 1385#ifndef _WIN32
3435f395
MA
1386 } else if (block->fd >= 0) {
1387 munmap(block->host, block->length);
1388 close(block->fd);
089f3f76 1389#endif
04b16653 1390 } else {
dfeaf2ab 1391 qemu_anon_ram_free(block->host, block->length);
04b16653 1392 }
7267c094 1393 g_free(block);
b2a8658e 1394 break;
04b16653
AW
1395 }
1396 }
b2a8658e 1397 qemu_mutex_unlock_ramlist();
04b16653 1398
e9a1ab19
FB
1399}
1400
cd19cfa2
HY
1401#ifndef _WIN32
1402void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1403{
1404 RAMBlock *block;
1405 ram_addr_t offset;
1406 int flags;
1407 void *area, *vaddr;
1408
a3161038 1409 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
cd19cfa2
HY
1410 offset = addr - block->offset;
1411 if (offset < block->length) {
1412 vaddr = block->host + offset;
7bd4f430 1413 if (block->flags & RAM_PREALLOC) {
cd19cfa2 1414 ;
dfeaf2ab
MA
1415 } else if (xen_enabled()) {
1416 abort();
cd19cfa2
HY
1417 } else {
1418 flags = MAP_FIXED;
1419 munmap(vaddr, length);
3435f395 1420 if (block->fd >= 0) {
dbcb8981
PB
1421 flags |= (block->flags & RAM_SHARED ?
1422 MAP_SHARED : MAP_PRIVATE);
3435f395
MA
1423 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1424 flags, block->fd, offset);
cd19cfa2 1425 } else {
2eb9fbaa
MA
1426 /*
1427 * Remap needs to match alloc. Accelerators that
1428 * set phys_mem_alloc never remap. If they did,
1429 * we'd need a remap hook here.
1430 */
1431 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1432
cd19cfa2
HY
1433 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1434 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1435 flags, -1, 0);
cd19cfa2
HY
1436 }
1437 if (area != vaddr) {
f15fbc4b
AP
1438 fprintf(stderr, "Could not remap addr: "
1439 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
1440 length, addr);
1441 exit(1);
1442 }
8490fc78 1443 memory_try_enable_merging(vaddr, length);
ddb97f1d 1444 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
1445 }
1446 return;
1447 }
1448 }
1449}
1450#endif /* !_WIN32 */
1451
a35ba7be
PB
1452int qemu_get_ram_fd(ram_addr_t addr)
1453{
1454 RAMBlock *block = qemu_get_ram_block(addr);
1455
1456 return block->fd;
1457}
1458
1b5ec234
PB
1459/* Return a host pointer to ram allocated with qemu_ram_alloc.
1460 With the exception of the softmmu code in this file, this should
1461 only be used for local memory (e.g. video ram) that the device owns,
1462 and knows it isn't going to access beyond the end of the block.
1463
1464 It should not be used for general purpose DMA.
1465 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1466 */
1467void *qemu_get_ram_ptr(ram_addr_t addr)
1468{
1469 RAMBlock *block = qemu_get_ram_block(addr);
1470
0d6d3c87
PB
1471 if (xen_enabled()) {
1472 /* We need to check if the requested address is in the RAM
1473 * because we don't want to map the entire memory in QEMU.
1474 * In that case just map until the end of the page.
1475 */
1476 if (block->offset == 0) {
1477 return xen_map_cache(addr, 0, 0);
1478 } else if (block->host == NULL) {
1479 block->host =
1480 xen_map_cache(block->offset, block->length, 1);
1481 }
1482 }
1483 return block->host + (addr - block->offset);
dc828ca1
PB
1484}
1485
38bee5dc
SS
1486/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1487 * but takes a size argument */
cb85f7ab 1488static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
38bee5dc 1489{
8ab934f9
SS
1490 if (*size == 0) {
1491 return NULL;
1492 }
868bb33f 1493 if (xen_enabled()) {
e41d7c69 1494 return xen_map_cache(addr, *size, 1);
868bb33f 1495 } else {
38bee5dc
SS
1496 RAMBlock *block;
1497
a3161038 1498 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
38bee5dc
SS
1499 if (addr - block->offset < block->length) {
1500 if (addr - block->offset + *size > block->length)
1501 *size = block->length - addr + block->offset;
1502 return block->host + (addr - block->offset);
1503 }
1504 }
1505
1506 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1507 abort();
38bee5dc
SS
1508 }
1509}
1510
7443b437
PB
1511/* Some of the softmmu routines need to translate from a host pointer
1512 (typically a TLB entry) back to a ram offset. */
1b5ec234 1513MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 1514{
94a6b54f
PB
1515 RAMBlock *block;
1516 uint8_t *host = ptr;
1517
868bb33f 1518 if (xen_enabled()) {
e41d7c69 1519 *ram_addr = xen_ram_addr_from_mapcache(ptr);
1b5ec234 1520 return qemu_get_ram_block(*ram_addr)->mr;
712c2b41
SS
1521 }
1522
23887b79
PB
1523 block = ram_list.mru_block;
1524 if (block && block->host && host - block->host < block->length) {
1525 goto found;
1526 }
1527
a3161038 1528 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
1529 /* This case append when the block is not mapped. */
1530 if (block->host == NULL) {
1531 continue;
1532 }
f471a17e 1533 if (host - block->host < block->length) {
23887b79 1534 goto found;
f471a17e 1535 }
94a6b54f 1536 }
432d268c 1537
1b5ec234 1538 return NULL;
23887b79
PB
1539
1540found:
1541 *ram_addr = block->offset + (host - block->host);
1b5ec234 1542 return block->mr;
e890261f 1543}
f471a17e 1544
a8170e5e 1545static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 1546 uint64_t val, unsigned size)
9fa3e853 1547{
52159192 1548 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
0e0df1e2 1549 tb_invalidate_phys_page_fast(ram_addr, size);
3a7d929e 1550 }
0e0df1e2
AK
1551 switch (size) {
1552 case 1:
1553 stb_p(qemu_get_ram_ptr(ram_addr), val);
1554 break;
1555 case 2:
1556 stw_p(qemu_get_ram_ptr(ram_addr), val);
1557 break;
1558 case 4:
1559 stl_p(qemu_get_ram_ptr(ram_addr), val);
1560 break;
1561 default:
1562 abort();
3a7d929e 1563 }
52159192
JQ
1564 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
1565 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
f23db169
FB
1566 /* we remove the notdirty callback only if the code has been
1567 flushed */
a2cd8c85 1568 if (!cpu_physical_memory_is_clean(ram_addr)) {
4917cf44 1569 CPUArchState *env = current_cpu->env_ptr;
93afeade 1570 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
4917cf44 1571 }
9fa3e853
FB
1572}
1573
b018ddf6
PB
1574static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1575 unsigned size, bool is_write)
1576{
1577 return is_write;
1578}
1579
0e0df1e2 1580static const MemoryRegionOps notdirty_mem_ops = {
0e0df1e2 1581 .write = notdirty_mem_write,
b018ddf6 1582 .valid.accepts = notdirty_mem_accepts,
0e0df1e2 1583 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
1584};
1585
0f459d16 1586/* Generate a debug exception if a watchpoint has been hit. */
b4051334 1587static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 1588{
93afeade
AF
1589 CPUState *cpu = current_cpu;
1590 CPUArchState *env = cpu->env_ptr;
06d55cc1 1591 target_ulong pc, cs_base;
0f459d16 1592 target_ulong vaddr;
a1d1bb31 1593 CPUWatchpoint *wp;
06d55cc1 1594 int cpu_flags;
0f459d16 1595
ff4700b0 1596 if (cpu->watchpoint_hit) {
06d55cc1
AL
1597 /* We re-entered the check after replacing the TB. Now raise
1598 * the debug interrupt so that is will trigger after the
1599 * current instruction. */
93afeade 1600 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
06d55cc1
AL
1601 return;
1602 }
93afeade 1603 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
ff4700b0 1604 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
b4051334
AL
1605 if ((vaddr == (wp->vaddr & len_mask) ||
1606 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28 1607 wp->flags |= BP_WATCHPOINT_HIT;
ff4700b0
AF
1608 if (!cpu->watchpoint_hit) {
1609 cpu->watchpoint_hit = wp;
239c51a5 1610 tb_check_watchpoint(cpu);
6e140f28 1611 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
27103424 1612 cpu->exception_index = EXCP_DEBUG;
5638d180 1613 cpu_loop_exit(cpu);
6e140f28
AL
1614 } else {
1615 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
648f034c 1616 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
0ea8cb88 1617 cpu_resume_from_signal(cpu, NULL);
6e140f28 1618 }
06d55cc1 1619 }
6e140f28
AL
1620 } else {
1621 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
1622 }
1623 }
1624}
1625
6658ffb8
PB
1626/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1627 so these check for a hit then pass through to the normal out-of-line
1628 phys routines. */
a8170e5e 1629static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 1630 unsigned size)
6658ffb8 1631{
1ec9b909
AK
1632 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1633 switch (size) {
2c17449b 1634 case 1: return ldub_phys(&address_space_memory, addr);
41701aa4 1635 case 2: return lduw_phys(&address_space_memory, addr);
fdfba1a2 1636 case 4: return ldl_phys(&address_space_memory, addr);
1ec9b909
AK
1637 default: abort();
1638 }
6658ffb8
PB
1639}
1640
a8170e5e 1641static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 1642 uint64_t val, unsigned size)
6658ffb8 1643{
1ec9b909
AK
1644 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1645 switch (size) {
67364150 1646 case 1:
db3be60d 1647 stb_phys(&address_space_memory, addr, val);
67364150
MF
1648 break;
1649 case 2:
5ce5944d 1650 stw_phys(&address_space_memory, addr, val);
67364150
MF
1651 break;
1652 case 4:
ab1da857 1653 stl_phys(&address_space_memory, addr, val);
67364150 1654 break;
1ec9b909
AK
1655 default: abort();
1656 }
6658ffb8
PB
1657}
1658
1ec9b909
AK
1659static const MemoryRegionOps watch_mem_ops = {
1660 .read = watch_mem_read,
1661 .write = watch_mem_write,
1662 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 1663};
6658ffb8 1664
a8170e5e 1665static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 1666 unsigned len)
db7b5426 1667{
acc9d80b
JK
1668 subpage_t *subpage = opaque;
1669 uint8_t buf[4];
791af8c8 1670
db7b5426 1671#if defined(DEBUG_SUBPAGE)
016e9d62 1672 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
acc9d80b 1673 subpage, len, addr);
db7b5426 1674#endif
acc9d80b
JK
1675 address_space_read(subpage->as, addr + subpage->base, buf, len);
1676 switch (len) {
1677 case 1:
1678 return ldub_p(buf);
1679 case 2:
1680 return lduw_p(buf);
1681 case 4:
1682 return ldl_p(buf);
1683 default:
1684 abort();
1685 }
db7b5426
BS
1686}
1687
a8170e5e 1688static void subpage_write(void *opaque, hwaddr addr,
70c68e44 1689 uint64_t value, unsigned len)
db7b5426 1690{
acc9d80b
JK
1691 subpage_t *subpage = opaque;
1692 uint8_t buf[4];
1693
db7b5426 1694#if defined(DEBUG_SUBPAGE)
016e9d62 1695 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
acc9d80b
JK
1696 " value %"PRIx64"\n",
1697 __func__, subpage, len, addr, value);
db7b5426 1698#endif
acc9d80b
JK
1699 switch (len) {
1700 case 1:
1701 stb_p(buf, value);
1702 break;
1703 case 2:
1704 stw_p(buf, value);
1705 break;
1706 case 4:
1707 stl_p(buf, value);
1708 break;
1709 default:
1710 abort();
1711 }
1712 address_space_write(subpage->as, addr + subpage->base, buf, len);
db7b5426
BS
1713}
1714
c353e4cc 1715static bool subpage_accepts(void *opaque, hwaddr addr,
016e9d62 1716 unsigned len, bool is_write)
c353e4cc 1717{
acc9d80b 1718 subpage_t *subpage = opaque;
c353e4cc 1719#if defined(DEBUG_SUBPAGE)
016e9d62 1720 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
acc9d80b 1721 __func__, subpage, is_write ? 'w' : 'r', len, addr);
c353e4cc
PB
1722#endif
1723
acc9d80b 1724 return address_space_access_valid(subpage->as, addr + subpage->base,
016e9d62 1725 len, is_write);
c353e4cc
PB
1726}
1727
70c68e44
AK
1728static const MemoryRegionOps subpage_ops = {
1729 .read = subpage_read,
1730 .write = subpage_write,
c353e4cc 1731 .valid.accepts = subpage_accepts,
70c68e44 1732 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
1733};
1734
c227f099 1735static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 1736 uint16_t section)
db7b5426
BS
1737{
1738 int idx, eidx;
1739
1740 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1741 return -1;
1742 idx = SUBPAGE_IDX(start);
1743 eidx = SUBPAGE_IDX(end);
1744#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1745 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1746 __func__, mmio, start, end, idx, eidx, section);
db7b5426 1747#endif
db7b5426 1748 for (; idx <= eidx; idx++) {
5312bd8b 1749 mmio->sub_section[idx] = section;
db7b5426
BS
1750 }
1751
1752 return 0;
1753}
1754
acc9d80b 1755static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
db7b5426 1756{
c227f099 1757 subpage_t *mmio;
db7b5426 1758
7267c094 1759 mmio = g_malloc0(sizeof(subpage_t));
1eec614b 1760
acc9d80b 1761 mmio->as = as;
1eec614b 1762 mmio->base = base;
2c9b15ca 1763 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
70c68e44 1764 "subpage", TARGET_PAGE_SIZE);
b3b00c78 1765 mmio->iomem.subpage = true;
db7b5426 1766#if defined(DEBUG_SUBPAGE)
016e9d62
AK
1767 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1768 mmio, base, TARGET_PAGE_SIZE);
db7b5426 1769#endif
b41aac4f 1770 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
db7b5426
BS
1771
1772 return mmio;
1773}
1774
a656e22f
PC
1775static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1776 MemoryRegion *mr)
5312bd8b 1777{
a656e22f 1778 assert(as);
5312bd8b 1779 MemoryRegionSection section = {
a656e22f 1780 .address_space = as,
5312bd8b
AK
1781 .mr = mr,
1782 .offset_within_address_space = 0,
1783 .offset_within_region = 0,
052e87b0 1784 .size = int128_2_64(),
5312bd8b
AK
1785 };
1786
53cb28cb 1787 return phys_section_add(map, &section);
5312bd8b
AK
1788}
1789
77717094 1790MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
aa102231 1791{
77717094 1792 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
1793}
1794
e9179ce1
AK
1795static void io_mem_init(void)
1796{
2c9b15ca
PB
1797 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1798 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
0e0df1e2 1799 "unassigned", UINT64_MAX);
2c9b15ca 1800 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
0e0df1e2 1801 "notdirty", UINT64_MAX);
2c9b15ca 1802 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
1ec9b909 1803 "watch", UINT64_MAX);
e9179ce1
AK
1804}
1805
ac1970fb 1806static void mem_begin(MemoryListener *listener)
00752703
PB
1807{
1808 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
53cb28cb
MA
1809 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1810 uint16_t n;
1811
a656e22f 1812 n = dummy_section(&d->map, as, &io_mem_unassigned);
53cb28cb 1813 assert(n == PHYS_SECTION_UNASSIGNED);
a656e22f 1814 n = dummy_section(&d->map, as, &io_mem_notdirty);
53cb28cb 1815 assert(n == PHYS_SECTION_NOTDIRTY);
a656e22f 1816 n = dummy_section(&d->map, as, &io_mem_rom);
53cb28cb 1817 assert(n == PHYS_SECTION_ROM);
a656e22f 1818 n = dummy_section(&d->map, as, &io_mem_watch);
53cb28cb 1819 assert(n == PHYS_SECTION_WATCH);
00752703 1820
9736e55b 1821 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
00752703
PB
1822 d->as = as;
1823 as->next_dispatch = d;
1824}
1825
1826static void mem_commit(MemoryListener *listener)
ac1970fb 1827{
89ae337a 1828 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
0475d94f
PB
1829 AddressSpaceDispatch *cur = as->dispatch;
1830 AddressSpaceDispatch *next = as->next_dispatch;
1831
53cb28cb 1832 phys_page_compact_all(next, next->map.nodes_nb);
b35ba30f 1833
0475d94f 1834 as->dispatch = next;
b41aac4f 1835
53cb28cb
MA
1836 if (cur) {
1837 phys_sections_free(&cur->map);
1838 g_free(cur);
1839 }
9affd6fc
PB
1840}
1841
1d71148e 1842static void tcg_commit(MemoryListener *listener)
50c1e149 1843{
182735ef 1844 CPUState *cpu;
117712c3
AK
1845
1846 /* since each CPU stores ram addresses in its TLB cache, we must
1847 reset the modified entries */
1848 /* XXX: slow ! */
bdc44640 1849 CPU_FOREACH(cpu) {
33bde2e1
EI
1850 /* FIXME: Disentangle the cpu.h circular files deps so we can
1851 directly get the right CPU from listener. */
1852 if (cpu->tcg_as_listener != listener) {
1853 continue;
1854 }
00c8cb0a 1855 tlb_flush(cpu, 1);
117712c3 1856 }
50c1e149
AK
1857}
1858
93632747
AK
1859static void core_log_global_start(MemoryListener *listener)
1860{
981fdf23 1861 cpu_physical_memory_set_dirty_tracking(true);
93632747
AK
1862}
1863
1864static void core_log_global_stop(MemoryListener *listener)
1865{
981fdf23 1866 cpu_physical_memory_set_dirty_tracking(false);
93632747
AK
1867}
1868
93632747 1869static MemoryListener core_memory_listener = {
93632747
AK
1870 .log_global_start = core_log_global_start,
1871 .log_global_stop = core_log_global_stop,
ac1970fb 1872 .priority = 1,
93632747
AK
1873};
1874
ac1970fb
AK
1875void address_space_init_dispatch(AddressSpace *as)
1876{
00752703 1877 as->dispatch = NULL;
89ae337a 1878 as->dispatch_listener = (MemoryListener) {
ac1970fb 1879 .begin = mem_begin,
00752703 1880 .commit = mem_commit,
ac1970fb
AK
1881 .region_add = mem_add,
1882 .region_nop = mem_add,
1883 .priority = 0,
1884 };
89ae337a 1885 memory_listener_register(&as->dispatch_listener, as);
ac1970fb
AK
1886}
1887
83f3c251
AK
1888void address_space_destroy_dispatch(AddressSpace *as)
1889{
1890 AddressSpaceDispatch *d = as->dispatch;
1891
89ae337a 1892 memory_listener_unregister(&as->dispatch_listener);
83f3c251
AK
1893 g_free(d);
1894 as->dispatch = NULL;
1895}
1896
62152b8a
AK
1897static void memory_map_init(void)
1898{
7267c094 1899 system_memory = g_malloc(sizeof(*system_memory));
03f49957 1900
57271d63 1901 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
7dca8043 1902 address_space_init(&address_space_memory, system_memory, "memory");
309cb471 1903
7267c094 1904 system_io = g_malloc(sizeof(*system_io));
3bb28b72
JK
1905 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1906 65536);
7dca8043 1907 address_space_init(&address_space_io, system_io, "I/O");
93632747 1908
f6790af6 1909 memory_listener_register(&core_memory_listener, &address_space_memory);
62152b8a
AK
1910}
1911
1912MemoryRegion *get_system_memory(void)
1913{
1914 return system_memory;
1915}
1916
309cb471
AK
1917MemoryRegion *get_system_io(void)
1918{
1919 return system_io;
1920}
1921
e2eef170
PB
1922#endif /* !defined(CONFIG_USER_ONLY) */
1923
13eb76e0
FB
1924/* physical memory access (slow version, mainly for debug) */
1925#if defined(CONFIG_USER_ONLY)
f17ec444 1926int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
a68fe89c 1927 uint8_t *buf, int len, int is_write)
13eb76e0
FB
1928{
1929 int l, flags;
1930 target_ulong page;
53a5960a 1931 void * p;
13eb76e0
FB
1932
1933 while (len > 0) {
1934 page = addr & TARGET_PAGE_MASK;
1935 l = (page + TARGET_PAGE_SIZE) - addr;
1936 if (l > len)
1937 l = len;
1938 flags = page_get_flags(page);
1939 if (!(flags & PAGE_VALID))
a68fe89c 1940 return -1;
13eb76e0
FB
1941 if (is_write) {
1942 if (!(flags & PAGE_WRITE))
a68fe89c 1943 return -1;
579a97f7 1944 /* XXX: this code should not depend on lock_user */
72fb7daa 1945 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 1946 return -1;
72fb7daa
AJ
1947 memcpy(p, buf, l);
1948 unlock_user(p, addr, l);
13eb76e0
FB
1949 } else {
1950 if (!(flags & PAGE_READ))
a68fe89c 1951 return -1;
579a97f7 1952 /* XXX: this code should not depend on lock_user */
72fb7daa 1953 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 1954 return -1;
72fb7daa 1955 memcpy(buf, p, l);
5b257578 1956 unlock_user(p, addr, 0);
13eb76e0
FB
1957 }
1958 len -= l;
1959 buf += l;
1960 addr += l;
1961 }
a68fe89c 1962 return 0;
13eb76e0 1963}
8df1cd07 1964
13eb76e0 1965#else
51d7a9eb 1966
a8170e5e
AK
1967static void invalidate_and_set_dirty(hwaddr addr,
1968 hwaddr length)
51d7a9eb 1969{
a2cd8c85 1970 if (cpu_physical_memory_is_clean(addr)) {
51d7a9eb
AP
1971 /* invalidate code */
1972 tb_invalidate_phys_page_range(addr, addr + length, 0);
1973 /* set dirty bit */
52159192
JQ
1974 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
1975 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
51d7a9eb 1976 }
e226939d 1977 xen_modified_memory(addr, length);
51d7a9eb
AP
1978}
1979
23326164 1980static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
82f2563f 1981{
e1622f4b 1982 unsigned access_size_max = mr->ops->valid.max_access_size;
23326164
RH
1983
1984 /* Regions are assumed to support 1-4 byte accesses unless
1985 otherwise specified. */
23326164
RH
1986 if (access_size_max == 0) {
1987 access_size_max = 4;
1988 }
1989
1990 /* Bound the maximum access by the alignment of the address. */
1991 if (!mr->ops->impl.unaligned) {
1992 unsigned align_size_max = addr & -addr;
1993 if (align_size_max != 0 && align_size_max < access_size_max) {
1994 access_size_max = align_size_max;
1995 }
82f2563f 1996 }
23326164
RH
1997
1998 /* Don't attempt accesses larger than the maximum. */
1999 if (l > access_size_max) {
2000 l = access_size_max;
82f2563f 2001 }
098178f2
PB
2002 if (l & (l - 1)) {
2003 l = 1 << (qemu_fls(l) - 1);
2004 }
23326164
RH
2005
2006 return l;
82f2563f
PB
2007}
2008
fd8aaa76 2009bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 2010 int len, bool is_write)
13eb76e0 2011{
149f54b5 2012 hwaddr l;
13eb76e0 2013 uint8_t *ptr;
791af8c8 2014 uint64_t val;
149f54b5 2015 hwaddr addr1;
5c8a00ce 2016 MemoryRegion *mr;
fd8aaa76 2017 bool error = false;
3b46e624 2018
13eb76e0 2019 while (len > 0) {
149f54b5 2020 l = len;
5c8a00ce 2021 mr = address_space_translate(as, addr, &addr1, &l, is_write);
3b46e624 2022
13eb76e0 2023 if (is_write) {
5c8a00ce
PB
2024 if (!memory_access_is_direct(mr, is_write)) {
2025 l = memory_access_size(mr, l, addr1);
4917cf44 2026 /* XXX: could force current_cpu to NULL to avoid
6a00d601 2027 potential bugs */
23326164
RH
2028 switch (l) {
2029 case 8:
2030 /* 64 bit write access */
2031 val = ldq_p(buf);
2032 error |= io_mem_write(mr, addr1, val, 8);
2033 break;
2034 case 4:
1c213d19 2035 /* 32 bit write access */
c27004ec 2036 val = ldl_p(buf);
5c8a00ce 2037 error |= io_mem_write(mr, addr1, val, 4);
23326164
RH
2038 break;
2039 case 2:
1c213d19 2040 /* 16 bit write access */
c27004ec 2041 val = lduw_p(buf);
5c8a00ce 2042 error |= io_mem_write(mr, addr1, val, 2);
23326164
RH
2043 break;
2044 case 1:
1c213d19 2045 /* 8 bit write access */
c27004ec 2046 val = ldub_p(buf);
5c8a00ce 2047 error |= io_mem_write(mr, addr1, val, 1);
23326164
RH
2048 break;
2049 default:
2050 abort();
13eb76e0 2051 }
2bbfa05d 2052 } else {
5c8a00ce 2053 addr1 += memory_region_get_ram_addr(mr);
13eb76e0 2054 /* RAM case */
5579c7f3 2055 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 2056 memcpy(ptr, buf, l);
51d7a9eb 2057 invalidate_and_set_dirty(addr1, l);
13eb76e0
FB
2058 }
2059 } else {
5c8a00ce 2060 if (!memory_access_is_direct(mr, is_write)) {
13eb76e0 2061 /* I/O case */
5c8a00ce 2062 l = memory_access_size(mr, l, addr1);
23326164
RH
2063 switch (l) {
2064 case 8:
2065 /* 64 bit read access */
2066 error |= io_mem_read(mr, addr1, &val, 8);
2067 stq_p(buf, val);
2068 break;
2069 case 4:
13eb76e0 2070 /* 32 bit read access */
5c8a00ce 2071 error |= io_mem_read(mr, addr1, &val, 4);
c27004ec 2072 stl_p(buf, val);
23326164
RH
2073 break;
2074 case 2:
13eb76e0 2075 /* 16 bit read access */
5c8a00ce 2076 error |= io_mem_read(mr, addr1, &val, 2);
c27004ec 2077 stw_p(buf, val);
23326164
RH
2078 break;
2079 case 1:
1c213d19 2080 /* 8 bit read access */
5c8a00ce 2081 error |= io_mem_read(mr, addr1, &val, 1);
c27004ec 2082 stb_p(buf, val);
23326164
RH
2083 break;
2084 default:
2085 abort();
13eb76e0
FB
2086 }
2087 } else {
2088 /* RAM case */
5c8a00ce 2089 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
f3705d53 2090 memcpy(buf, ptr, l);
13eb76e0
FB
2091 }
2092 }
2093 len -= l;
2094 buf += l;
2095 addr += l;
2096 }
fd8aaa76
PB
2097
2098 return error;
13eb76e0 2099}
8df1cd07 2100
fd8aaa76 2101bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
2102 const uint8_t *buf, int len)
2103{
fd8aaa76 2104 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
ac1970fb
AK
2105}
2106
fd8aaa76 2107bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb 2108{
fd8aaa76 2109 return address_space_rw(as, addr, buf, len, false);
ac1970fb
AK
2110}
2111
2112
a8170e5e 2113void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
2114 int len, int is_write)
2115{
fd8aaa76 2116 address_space_rw(&address_space_memory, addr, buf, len, is_write);
ac1970fb
AK
2117}
2118
582b55a9
AG
2119enum write_rom_type {
2120 WRITE_DATA,
2121 FLUSH_CACHE,
2122};
2123
2a221651 2124static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
582b55a9 2125 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
d0ecd2aa 2126{
149f54b5 2127 hwaddr l;
d0ecd2aa 2128 uint8_t *ptr;
149f54b5 2129 hwaddr addr1;
5c8a00ce 2130 MemoryRegion *mr;
3b46e624 2131
d0ecd2aa 2132 while (len > 0) {
149f54b5 2133 l = len;
2a221651 2134 mr = address_space_translate(as, addr, &addr1, &l, true);
3b46e624 2135
5c8a00ce
PB
2136 if (!(memory_region_is_ram(mr) ||
2137 memory_region_is_romd(mr))) {
d0ecd2aa
FB
2138 /* do nothing */
2139 } else {
5c8a00ce 2140 addr1 += memory_region_get_ram_addr(mr);
d0ecd2aa 2141 /* ROM/RAM case */
5579c7f3 2142 ptr = qemu_get_ram_ptr(addr1);
582b55a9
AG
2143 switch (type) {
2144 case WRITE_DATA:
2145 memcpy(ptr, buf, l);
2146 invalidate_and_set_dirty(addr1, l);
2147 break;
2148 case FLUSH_CACHE:
2149 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2150 break;
2151 }
d0ecd2aa
FB
2152 }
2153 len -= l;
2154 buf += l;
2155 addr += l;
2156 }
2157}
2158
582b55a9 2159/* used for ROM loading : can write in RAM and ROM */
2a221651 2160void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
582b55a9
AG
2161 const uint8_t *buf, int len)
2162{
2a221651 2163 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
582b55a9
AG
2164}
2165
2166void cpu_flush_icache_range(hwaddr start, int len)
2167{
2168 /*
2169 * This function should do the same thing as an icache flush that was
2170 * triggered from within the guest. For TCG we are always cache coherent,
2171 * so there is no need to flush anything. For KVM / Xen we need to flush
2172 * the host's instruction cache at least.
2173 */
2174 if (tcg_enabled()) {
2175 return;
2176 }
2177
2a221651
EI
2178 cpu_physical_memory_write_rom_internal(&address_space_memory,
2179 start, NULL, len, FLUSH_CACHE);
582b55a9
AG
2180}
2181
6d16c2f8 2182typedef struct {
d3e71559 2183 MemoryRegion *mr;
6d16c2f8 2184 void *buffer;
a8170e5e
AK
2185 hwaddr addr;
2186 hwaddr len;
6d16c2f8
AL
2187} BounceBuffer;
2188
2189static BounceBuffer bounce;
2190
ba223c29
AL
2191typedef struct MapClient {
2192 void *opaque;
2193 void (*callback)(void *opaque);
72cf2d4f 2194 QLIST_ENTRY(MapClient) link;
ba223c29
AL
2195} MapClient;
2196
72cf2d4f
BS
2197static QLIST_HEAD(map_client_list, MapClient) map_client_list
2198 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
2199
2200void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2201{
7267c094 2202 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
2203
2204 client->opaque = opaque;
2205 client->callback = callback;
72cf2d4f 2206 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
2207 return client;
2208}
2209
8b9c99d9 2210static void cpu_unregister_map_client(void *_client)
ba223c29
AL
2211{
2212 MapClient *client = (MapClient *)_client;
2213
72cf2d4f 2214 QLIST_REMOVE(client, link);
7267c094 2215 g_free(client);
ba223c29
AL
2216}
2217
2218static void cpu_notify_map_clients(void)
2219{
2220 MapClient *client;
2221
72cf2d4f
BS
2222 while (!QLIST_EMPTY(&map_client_list)) {
2223 client = QLIST_FIRST(&map_client_list);
ba223c29 2224 client->callback(client->opaque);
34d5e948 2225 cpu_unregister_map_client(client);
ba223c29
AL
2226 }
2227}
2228
51644ab7
PB
2229bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2230{
5c8a00ce 2231 MemoryRegion *mr;
51644ab7
PB
2232 hwaddr l, xlat;
2233
2234 while (len > 0) {
2235 l = len;
5c8a00ce
PB
2236 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2237 if (!memory_access_is_direct(mr, is_write)) {
2238 l = memory_access_size(mr, l, addr);
2239 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
51644ab7
PB
2240 return false;
2241 }
2242 }
2243
2244 len -= l;
2245 addr += l;
2246 }
2247 return true;
2248}
2249
6d16c2f8
AL
2250/* Map a physical memory region into a host virtual address.
2251 * May map a subset of the requested range, given by and returned in *plen.
2252 * May return NULL if resources needed to perform the mapping are exhausted.
2253 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
2254 * Use cpu_register_map_client() to know when retrying the map operation is
2255 * likely to succeed.
6d16c2f8 2256 */
ac1970fb 2257void *address_space_map(AddressSpace *as,
a8170e5e
AK
2258 hwaddr addr,
2259 hwaddr *plen,
ac1970fb 2260 bool is_write)
6d16c2f8 2261{
a8170e5e 2262 hwaddr len = *plen;
e3127ae0
PB
2263 hwaddr done = 0;
2264 hwaddr l, xlat, base;
2265 MemoryRegion *mr, *this_mr;
2266 ram_addr_t raddr;
6d16c2f8 2267
e3127ae0
PB
2268 if (len == 0) {
2269 return NULL;
2270 }
38bee5dc 2271
e3127ae0
PB
2272 l = len;
2273 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2274 if (!memory_access_is_direct(mr, is_write)) {
2275 if (bounce.buffer) {
2276 return NULL;
6d16c2f8 2277 }
e85d9db5
KW
2278 /* Avoid unbounded allocations */
2279 l = MIN(l, TARGET_PAGE_SIZE);
2280 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
e3127ae0
PB
2281 bounce.addr = addr;
2282 bounce.len = l;
d3e71559
PB
2283
2284 memory_region_ref(mr);
2285 bounce.mr = mr;
e3127ae0
PB
2286 if (!is_write) {
2287 address_space_read(as, addr, bounce.buffer, l);
8ab934f9 2288 }
6d16c2f8 2289
e3127ae0
PB
2290 *plen = l;
2291 return bounce.buffer;
2292 }
2293
2294 base = xlat;
2295 raddr = memory_region_get_ram_addr(mr);
2296
2297 for (;;) {
6d16c2f8
AL
2298 len -= l;
2299 addr += l;
e3127ae0
PB
2300 done += l;
2301 if (len == 0) {
2302 break;
2303 }
2304
2305 l = len;
2306 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2307 if (this_mr != mr || xlat != base + done) {
2308 break;
2309 }
6d16c2f8 2310 }
e3127ae0 2311
d3e71559 2312 memory_region_ref(mr);
e3127ae0
PB
2313 *plen = done;
2314 return qemu_ram_ptr_length(raddr + base, plen);
6d16c2f8
AL
2315}
2316
ac1970fb 2317/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
2318 * Will also mark the memory as dirty if is_write == 1. access_len gives
2319 * the amount of memory that was actually read or written by the caller.
2320 */
a8170e5e
AK
2321void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2322 int is_write, hwaddr access_len)
6d16c2f8
AL
2323{
2324 if (buffer != bounce.buffer) {
d3e71559
PB
2325 MemoryRegion *mr;
2326 ram_addr_t addr1;
2327
2328 mr = qemu_ram_addr_from_host(buffer, &addr1);
2329 assert(mr != NULL);
6d16c2f8 2330 if (is_write) {
6d16c2f8
AL
2331 while (access_len) {
2332 unsigned l;
2333 l = TARGET_PAGE_SIZE;
2334 if (l > access_len)
2335 l = access_len;
51d7a9eb 2336 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
2337 addr1 += l;
2338 access_len -= l;
2339 }
2340 }
868bb33f 2341 if (xen_enabled()) {
e41d7c69 2342 xen_invalidate_map_cache_entry(buffer);
050a0ddf 2343 }
d3e71559 2344 memory_region_unref(mr);
6d16c2f8
AL
2345 return;
2346 }
2347 if (is_write) {
ac1970fb 2348 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 2349 }
f8a83245 2350 qemu_vfree(bounce.buffer);
6d16c2f8 2351 bounce.buffer = NULL;
d3e71559 2352 memory_region_unref(bounce.mr);
ba223c29 2353 cpu_notify_map_clients();
6d16c2f8 2354}
d0ecd2aa 2355
a8170e5e
AK
2356void *cpu_physical_memory_map(hwaddr addr,
2357 hwaddr *plen,
ac1970fb
AK
2358 int is_write)
2359{
2360 return address_space_map(&address_space_memory, addr, plen, is_write);
2361}
2362
a8170e5e
AK
2363void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2364 int is_write, hwaddr access_len)
ac1970fb
AK
2365{
2366 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2367}
2368
8df1cd07 2369/* warning: addr must be aligned */
fdfba1a2 2370static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2371 enum device_endian endian)
8df1cd07 2372{
8df1cd07 2373 uint8_t *ptr;
791af8c8 2374 uint64_t val;
5c8a00ce 2375 MemoryRegion *mr;
149f54b5
PB
2376 hwaddr l = 4;
2377 hwaddr addr1;
8df1cd07 2378
fdfba1a2 2379 mr = address_space_translate(as, addr, &addr1, &l, false);
5c8a00ce 2380 if (l < 4 || !memory_access_is_direct(mr, false)) {
8df1cd07 2381 /* I/O case */
5c8a00ce 2382 io_mem_read(mr, addr1, &val, 4);
1e78bcc1
AG
2383#if defined(TARGET_WORDS_BIGENDIAN)
2384 if (endian == DEVICE_LITTLE_ENDIAN) {
2385 val = bswap32(val);
2386 }
2387#else
2388 if (endian == DEVICE_BIG_ENDIAN) {
2389 val = bswap32(val);
2390 }
2391#endif
8df1cd07
FB
2392 } else {
2393 /* RAM case */
5c8a00ce 2394 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2395 & TARGET_PAGE_MASK)
149f54b5 2396 + addr1);
1e78bcc1
AG
2397 switch (endian) {
2398 case DEVICE_LITTLE_ENDIAN:
2399 val = ldl_le_p(ptr);
2400 break;
2401 case DEVICE_BIG_ENDIAN:
2402 val = ldl_be_p(ptr);
2403 break;
2404 default:
2405 val = ldl_p(ptr);
2406 break;
2407 }
8df1cd07
FB
2408 }
2409 return val;
2410}
2411
fdfba1a2 2412uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2413{
fdfba1a2 2414 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2415}
2416
fdfba1a2 2417uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2418{
fdfba1a2 2419 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2420}
2421
fdfba1a2 2422uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2423{
fdfba1a2 2424 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2425}
2426
84b7b8e7 2427/* warning: addr must be aligned */
2c17449b 2428static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2429 enum device_endian endian)
84b7b8e7 2430{
84b7b8e7
FB
2431 uint8_t *ptr;
2432 uint64_t val;
5c8a00ce 2433 MemoryRegion *mr;
149f54b5
PB
2434 hwaddr l = 8;
2435 hwaddr addr1;
84b7b8e7 2436
2c17449b 2437 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2438 false);
2439 if (l < 8 || !memory_access_is_direct(mr, false)) {
84b7b8e7 2440 /* I/O case */
5c8a00ce 2441 io_mem_read(mr, addr1, &val, 8);
968a5627
PB
2442#if defined(TARGET_WORDS_BIGENDIAN)
2443 if (endian == DEVICE_LITTLE_ENDIAN) {
2444 val = bswap64(val);
2445 }
2446#else
2447 if (endian == DEVICE_BIG_ENDIAN) {
2448 val = bswap64(val);
2449 }
84b7b8e7
FB
2450#endif
2451 } else {
2452 /* RAM case */
5c8a00ce 2453 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2454 & TARGET_PAGE_MASK)
149f54b5 2455 + addr1);
1e78bcc1
AG
2456 switch (endian) {
2457 case DEVICE_LITTLE_ENDIAN:
2458 val = ldq_le_p(ptr);
2459 break;
2460 case DEVICE_BIG_ENDIAN:
2461 val = ldq_be_p(ptr);
2462 break;
2463 default:
2464 val = ldq_p(ptr);
2465 break;
2466 }
84b7b8e7
FB
2467 }
2468 return val;
2469}
2470
2c17449b 2471uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2472{
2c17449b 2473 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2474}
2475
2c17449b 2476uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2477{
2c17449b 2478 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2479}
2480
2c17449b 2481uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2482{
2c17449b 2483 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2484}
2485
aab33094 2486/* XXX: optimize */
2c17449b 2487uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
aab33094
FB
2488{
2489 uint8_t val;
2c17449b 2490 address_space_rw(as, addr, &val, 1, 0);
aab33094
FB
2491 return val;
2492}
2493
733f0b02 2494/* warning: addr must be aligned */
41701aa4 2495static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
1e78bcc1 2496 enum device_endian endian)
aab33094 2497{
733f0b02
MT
2498 uint8_t *ptr;
2499 uint64_t val;
5c8a00ce 2500 MemoryRegion *mr;
149f54b5
PB
2501 hwaddr l = 2;
2502 hwaddr addr1;
733f0b02 2503
41701aa4 2504 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2505 false);
2506 if (l < 2 || !memory_access_is_direct(mr, false)) {
733f0b02 2507 /* I/O case */
5c8a00ce 2508 io_mem_read(mr, addr1, &val, 2);
1e78bcc1
AG
2509#if defined(TARGET_WORDS_BIGENDIAN)
2510 if (endian == DEVICE_LITTLE_ENDIAN) {
2511 val = bswap16(val);
2512 }
2513#else
2514 if (endian == DEVICE_BIG_ENDIAN) {
2515 val = bswap16(val);
2516 }
2517#endif
733f0b02
MT
2518 } else {
2519 /* RAM case */
5c8a00ce 2520 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
06ef3525 2521 & TARGET_PAGE_MASK)
149f54b5 2522 + addr1);
1e78bcc1
AG
2523 switch (endian) {
2524 case DEVICE_LITTLE_ENDIAN:
2525 val = lduw_le_p(ptr);
2526 break;
2527 case DEVICE_BIG_ENDIAN:
2528 val = lduw_be_p(ptr);
2529 break;
2530 default:
2531 val = lduw_p(ptr);
2532 break;
2533 }
733f0b02
MT
2534 }
2535 return val;
aab33094
FB
2536}
2537
41701aa4 2538uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2539{
41701aa4 2540 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2541}
2542
41701aa4 2543uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2544{
41701aa4 2545 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2546}
2547
41701aa4 2548uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
1e78bcc1 2549{
41701aa4 2550 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2551}
2552
8df1cd07
FB
2553/* warning: addr must be aligned. The ram page is not masked as dirty
2554 and the code inside is not invalidated. It is useful if the dirty
2555 bits are used to track modified PTEs */
2198a121 2556void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
8df1cd07 2557{
8df1cd07 2558 uint8_t *ptr;
5c8a00ce 2559 MemoryRegion *mr;
149f54b5
PB
2560 hwaddr l = 4;
2561 hwaddr addr1;
8df1cd07 2562
2198a121 2563 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2564 true);
2565 if (l < 4 || !memory_access_is_direct(mr, true)) {
2566 io_mem_write(mr, addr1, val, 4);
8df1cd07 2567 } else {
5c8a00ce 2568 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2569 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 2570 stl_p(ptr, val);
74576198
AL
2571
2572 if (unlikely(in_migration)) {
a2cd8c85 2573 if (cpu_physical_memory_is_clean(addr1)) {
74576198
AL
2574 /* invalidate code */
2575 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2576 /* set dirty bit */
52159192
JQ
2577 cpu_physical_memory_set_dirty_flag(addr1,
2578 DIRTY_MEMORY_MIGRATION);
2579 cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
74576198
AL
2580 }
2581 }
8df1cd07
FB
2582 }
2583}
2584
2585/* warning: addr must be aligned */
ab1da857
EI
2586static inline void stl_phys_internal(AddressSpace *as,
2587 hwaddr addr, uint32_t val,
1e78bcc1 2588 enum device_endian endian)
8df1cd07 2589{
8df1cd07 2590 uint8_t *ptr;
5c8a00ce 2591 MemoryRegion *mr;
149f54b5
PB
2592 hwaddr l = 4;
2593 hwaddr addr1;
8df1cd07 2594
ab1da857 2595 mr = address_space_translate(as, addr, &addr1, &l,
5c8a00ce
PB
2596 true);
2597 if (l < 4 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2598#if defined(TARGET_WORDS_BIGENDIAN)
2599 if (endian == DEVICE_LITTLE_ENDIAN) {
2600 val = bswap32(val);
2601 }
2602#else
2603 if (endian == DEVICE_BIG_ENDIAN) {
2604 val = bswap32(val);
2605 }
2606#endif
5c8a00ce 2607 io_mem_write(mr, addr1, val, 4);
8df1cd07 2608 } else {
8df1cd07 2609 /* RAM case */
5c8a00ce 2610 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
5579c7f3 2611 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2612 switch (endian) {
2613 case DEVICE_LITTLE_ENDIAN:
2614 stl_le_p(ptr, val);
2615 break;
2616 case DEVICE_BIG_ENDIAN:
2617 stl_be_p(ptr, val);
2618 break;
2619 default:
2620 stl_p(ptr, val);
2621 break;
2622 }
51d7a9eb 2623 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
2624 }
2625}
2626
ab1da857 2627void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2628{
ab1da857 2629 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2630}
2631
ab1da857 2632void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2633{
ab1da857 2634 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2635}
2636
ab1da857 2637void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2638{
ab1da857 2639 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2640}
2641
aab33094 2642/* XXX: optimize */
db3be60d 2643void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
aab33094
FB
2644{
2645 uint8_t v = val;
db3be60d 2646 address_space_rw(as, addr, &v, 1, 1);
aab33094
FB
2647}
2648
733f0b02 2649/* warning: addr must be aligned */
5ce5944d
EI
2650static inline void stw_phys_internal(AddressSpace *as,
2651 hwaddr addr, uint32_t val,
1e78bcc1 2652 enum device_endian endian)
aab33094 2653{
733f0b02 2654 uint8_t *ptr;
5c8a00ce 2655 MemoryRegion *mr;
149f54b5
PB
2656 hwaddr l = 2;
2657 hwaddr addr1;
733f0b02 2658
5ce5944d 2659 mr = address_space_translate(as, addr, &addr1, &l, true);
5c8a00ce 2660 if (l < 2 || !memory_access_is_direct(mr, true)) {
1e78bcc1
AG
2661#if defined(TARGET_WORDS_BIGENDIAN)
2662 if (endian == DEVICE_LITTLE_ENDIAN) {
2663 val = bswap16(val);
2664 }
2665#else
2666 if (endian == DEVICE_BIG_ENDIAN) {
2667 val = bswap16(val);
2668 }
2669#endif
5c8a00ce 2670 io_mem_write(mr, addr1, val, 2);
733f0b02 2671 } else {
733f0b02 2672 /* RAM case */
5c8a00ce 2673 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
733f0b02 2674 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
2675 switch (endian) {
2676 case DEVICE_LITTLE_ENDIAN:
2677 stw_le_p(ptr, val);
2678 break;
2679 case DEVICE_BIG_ENDIAN:
2680 stw_be_p(ptr, val);
2681 break;
2682 default:
2683 stw_p(ptr, val);
2684 break;
2685 }
51d7a9eb 2686 invalidate_and_set_dirty(addr1, 2);
733f0b02 2687 }
aab33094
FB
2688}
2689
5ce5944d 2690void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2691{
5ce5944d 2692 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
1e78bcc1
AG
2693}
2694
5ce5944d 2695void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2696{
5ce5944d 2697 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
1e78bcc1
AG
2698}
2699
5ce5944d 2700void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
1e78bcc1 2701{
5ce5944d 2702 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
1e78bcc1
AG
2703}
2704
aab33094 2705/* XXX: optimize */
f606604f 2706void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
aab33094
FB
2707{
2708 val = tswap64(val);
f606604f 2709 address_space_rw(as, addr, (void *) &val, 8, 1);
aab33094
FB
2710}
2711
f606604f 2712void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2713{
2714 val = cpu_to_le64(val);
f606604f 2715 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2716}
2717
f606604f 2718void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
1e78bcc1
AG
2719{
2720 val = cpu_to_be64(val);
f606604f 2721 address_space_rw(as, addr, (void *) &val, 8, 1);
1e78bcc1
AG
2722}
2723
5e2972fd 2724/* virtual memory access for debug (includes writing to ROM) */
f17ec444 2725int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
b448f2f3 2726 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2727{
2728 int l;
a8170e5e 2729 hwaddr phys_addr;
9b3c35e0 2730 target_ulong page;
13eb76e0
FB
2731
2732 while (len > 0) {
2733 page = addr & TARGET_PAGE_MASK;
f17ec444 2734 phys_addr = cpu_get_phys_page_debug(cpu, page);
13eb76e0
FB
2735 /* if no physical page mapped, return an error */
2736 if (phys_addr == -1)
2737 return -1;
2738 l = (page + TARGET_PAGE_SIZE) - addr;
2739 if (l > len)
2740 l = len;
5e2972fd 2741 phys_addr += (addr & ~TARGET_PAGE_MASK);
2e38847b
EI
2742 if (is_write) {
2743 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2744 } else {
2745 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2746 }
13eb76e0
FB
2747 len -= l;
2748 buf += l;
2749 addr += l;
2750 }
2751 return 0;
2752}
a68fe89c 2753#endif
13eb76e0 2754
8e4a424b
BS
2755#if !defined(CONFIG_USER_ONLY)
2756
2757/*
2758 * A helper function for the _utterly broken_ virtio device model to find out if
2759 * it's running on a big endian machine. Don't do this at home kids!
2760 */
2761bool virtio_is_big_endian(void);
2762bool virtio_is_big_endian(void)
2763{
2764#if defined(TARGET_WORDS_BIGENDIAN)
2765 return true;
2766#else
2767 return false;
2768#endif
2769}
2770
2771#endif
2772
76f35538 2773#ifndef CONFIG_USER_ONLY
a8170e5e 2774bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538 2775{
5c8a00ce 2776 MemoryRegion*mr;
149f54b5 2777 hwaddr l = 1;
76f35538 2778
5c8a00ce
PB
2779 mr = address_space_translate(&address_space_memory,
2780 phys_addr, &phys_addr, &l, false);
76f35538 2781
5c8a00ce
PB
2782 return !(memory_region_is_ram(mr) ||
2783 memory_region_is_romd(mr));
76f35538 2784}
bd2fa51f
MH
2785
2786void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2787{
2788 RAMBlock *block;
2789
2790 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2791 func(block->host, block->offset, block->length, opaque);
2792 }
2793}
ec3f8c99 2794#endif
This page took 1.330195 seconds and 4 git commands to generate.