]>
Commit | Line | Data |
---|---|---|
54936004 | 1 | /* |
5b6dd868 | 2 | * Virtual page mapping |
5fafdf24 | 3 | * |
54936004 FB |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
8167ee88 | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
54936004 | 18 | */ |
7b31bbc2 | 19 | #include "qemu/osdep.h" |
da34e65c | 20 | #include "qapi/error.h" |
777872e5 | 21 | #ifndef _WIN32 |
d5a8f07c | 22 | #endif |
54936004 | 23 | |
f348b6d1 | 24 | #include "qemu/cutils.h" |
6180a181 | 25 | #include "cpu.h" |
63c91552 | 26 | #include "exec/exec-all.h" |
51180423 | 27 | #include "exec/target_page.h" |
b67d9a52 | 28 | #include "tcg.h" |
741da0d3 | 29 | #include "hw/qdev-core.h" |
c7e002c5 | 30 | #include "hw/qdev-properties.h" |
4485bd26 | 31 | #if !defined(CONFIG_USER_ONLY) |
47c8ca53 | 32 | #include "hw/boards.h" |
33c11879 | 33 | #include "hw/xen/xen.h" |
4485bd26 | 34 | #endif |
9c17d615 | 35 | #include "sysemu/kvm.h" |
2ff3de68 | 36 | #include "sysemu/sysemu.h" |
1de7afc9 PB |
37 | #include "qemu/timer.h" |
38 | #include "qemu/config-file.h" | |
75a34036 | 39 | #include "qemu/error-report.h" |
53a5960a | 40 | #if defined(CONFIG_USER_ONLY) |
a9c94277 | 41 | #include "qemu.h" |
432d268c | 42 | #else /* !CONFIG_USER_ONLY */ |
741da0d3 PB |
43 | #include "hw/hw.h" |
44 | #include "exec/memory.h" | |
df43d49c | 45 | #include "exec/ioport.h" |
741da0d3 | 46 | #include "sysemu/dma.h" |
9c607668 | 47 | #include "sysemu/numa.h" |
79ca7a1b | 48 | #include "sysemu/hw_accel.h" |
741da0d3 | 49 | #include "exec/address-spaces.h" |
9c17d615 | 50 | #include "sysemu/xen-mapcache.h" |
0ab8ed18 | 51 | #include "trace-root.h" |
d3a5038c | 52 | |
e2fa71f5 DDAG |
53 | #ifdef CONFIG_FALLOCATE_PUNCH_HOLE |
54 | #include <fcntl.h> | |
55 | #include <linux/falloc.h> | |
56 | #endif | |
57 | ||
53a5960a | 58 | #endif |
0d6d3c87 | 59 | #include "exec/cpu-all.h" |
0dc3f44a | 60 | #include "qemu/rcu_queue.h" |
4840f10e | 61 | #include "qemu/main-loop.h" |
5b6dd868 | 62 | #include "translate-all.h" |
7615936e | 63 | #include "sysemu/replay.h" |
0cac1b66 | 64 | |
022c62cb | 65 | #include "exec/memory-internal.h" |
220c3ebd | 66 | #include "exec/ram_addr.h" |
508127e2 | 67 | #include "exec/log.h" |
67d95c15 | 68 | |
9dfeca7c BR |
69 | #include "migration/vmstate.h" |
70 | ||
b35ba30f | 71 | #include "qemu/range.h" |
794e8f30 MT |
72 | #ifndef _WIN32 |
73 | #include "qemu/mmap-alloc.h" | |
74 | #endif | |
b35ba30f | 75 | |
be9b23c4 PX |
76 | #include "monitor/monitor.h" |
77 | ||
db7b5426 | 78 | //#define DEBUG_SUBPAGE |
1196be37 | 79 | |
e2eef170 | 80 | #if !defined(CONFIG_USER_ONLY) |
0dc3f44a MD |
81 | /* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes |
82 | * are protected by the ramlist lock. | |
83 | */ | |
0d53d9fe | 84 | RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) }; |
62152b8a AK |
85 | |
86 | static MemoryRegion *system_memory; | |
309cb471 | 87 | static MemoryRegion *system_io; |
62152b8a | 88 | |
f6790af6 AK |
89 | AddressSpace address_space_io; |
90 | AddressSpace address_space_memory; | |
2673a5da | 91 | |
0844e007 | 92 | MemoryRegion io_mem_rom, io_mem_notdirty; |
acc9d80b | 93 | static MemoryRegion io_mem_unassigned; |
0e0df1e2 | 94 | |
7bd4f430 PB |
95 | /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */ |
96 | #define RAM_PREALLOC (1 << 0) | |
97 | ||
dbcb8981 PB |
98 | /* RAM is mmap-ed with MAP_SHARED */ |
99 | #define RAM_SHARED (1 << 1) | |
100 | ||
62be4e3a MT |
101 | /* Only a portion of RAM (used_length) is actually used, and migrated. |
102 | * This used_length size can change across reboots. | |
103 | */ | |
104 | #define RAM_RESIZEABLE (1 << 2) | |
105 | ||
e2eef170 | 106 | #endif |
9fa3e853 | 107 | |
20bccb82 PM |
108 | #ifdef TARGET_PAGE_BITS_VARY |
109 | int target_page_bits; | |
110 | bool target_page_bits_decided; | |
111 | #endif | |
112 | ||
bdc44640 | 113 | struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus); |
6a00d601 FB |
114 | /* current CPU in the current thread. It is only valid inside |
115 | cpu_exec() */ | |
f240eb6f | 116 | __thread CPUState *current_cpu; |
2e70f6ef | 117 | /* 0 = Do not count executed instructions. |
bf20dc07 | 118 | 1 = Precise instruction counting. |
2e70f6ef | 119 | 2 = Adaptive rate instruction counting. */ |
5708fc66 | 120 | int use_icount; |
6a00d601 | 121 | |
a0be0c58 YZ |
122 | uintptr_t qemu_host_page_size; |
123 | intptr_t qemu_host_page_mask; | |
124 | uintptr_t qemu_real_host_page_size; | |
125 | intptr_t qemu_real_host_page_mask; | |
126 | ||
20bccb82 PM |
127 | bool set_preferred_target_page_bits(int bits) |
128 | { | |
129 | /* The target page size is the lowest common denominator for all | |
130 | * the CPUs in the system, so we can only make it smaller, never | |
131 | * larger. And we can't make it smaller once we've committed to | |
132 | * a particular size. | |
133 | */ | |
134 | #ifdef TARGET_PAGE_BITS_VARY | |
135 | assert(bits >= TARGET_PAGE_BITS_MIN); | |
136 | if (target_page_bits == 0 || target_page_bits > bits) { | |
137 | if (target_page_bits_decided) { | |
138 | return false; | |
139 | } | |
140 | target_page_bits = bits; | |
141 | } | |
142 | #endif | |
143 | return true; | |
144 | } | |
145 | ||
e2eef170 | 146 | #if !defined(CONFIG_USER_ONLY) |
4346ae3e | 147 | |
20bccb82 PM |
148 | static void finalize_target_page_bits(void) |
149 | { | |
150 | #ifdef TARGET_PAGE_BITS_VARY | |
151 | if (target_page_bits == 0) { | |
152 | target_page_bits = TARGET_PAGE_BITS_MIN; | |
153 | } | |
154 | target_page_bits_decided = true; | |
155 | #endif | |
156 | } | |
157 | ||
1db8abb1 PB |
158 | typedef struct PhysPageEntry PhysPageEntry; |
159 | ||
160 | struct PhysPageEntry { | |
9736e55b | 161 | /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */ |
8b795765 | 162 | uint32_t skip : 6; |
9736e55b | 163 | /* index into phys_sections (!skip) or phys_map_nodes (skip) */ |
8b795765 | 164 | uint32_t ptr : 26; |
1db8abb1 PB |
165 | }; |
166 | ||
8b795765 MT |
167 | #define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6) |
168 | ||
03f49957 | 169 | /* Size of the L2 (and L3, etc) page tables. */ |
57271d63 | 170 | #define ADDR_SPACE_BITS 64 |
03f49957 | 171 | |
026736ce | 172 | #define P_L2_BITS 9 |
03f49957 PB |
173 | #define P_L2_SIZE (1 << P_L2_BITS) |
174 | ||
175 | #define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1) | |
176 | ||
177 | typedef PhysPageEntry Node[P_L2_SIZE]; | |
0475d94f | 178 | |
53cb28cb | 179 | typedef struct PhysPageMap { |
79e2b9ae PB |
180 | struct rcu_head rcu; |
181 | ||
53cb28cb MA |
182 | unsigned sections_nb; |
183 | unsigned sections_nb_alloc; | |
184 | unsigned nodes_nb; | |
185 | unsigned nodes_nb_alloc; | |
186 | Node *nodes; | |
187 | MemoryRegionSection *sections; | |
188 | } PhysPageMap; | |
189 | ||
1db8abb1 | 190 | struct AddressSpaceDispatch { |
79e2b9ae PB |
191 | struct rcu_head rcu; |
192 | ||
729633c2 | 193 | MemoryRegionSection *mru_section; |
1db8abb1 PB |
194 | /* This is a multi-level map on the physical address space. |
195 | * The bottom level has pointers to MemoryRegionSections. | |
196 | */ | |
197 | PhysPageEntry phys_map; | |
53cb28cb | 198 | PhysPageMap map; |
acc9d80b | 199 | AddressSpace *as; |
1db8abb1 PB |
200 | }; |
201 | ||
90260c6c JK |
202 | #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) |
203 | typedef struct subpage_t { | |
204 | MemoryRegion iomem; | |
acc9d80b | 205 | AddressSpace *as; |
90260c6c | 206 | hwaddr base; |
2615fabd | 207 | uint16_t sub_section[]; |
90260c6c JK |
208 | } subpage_t; |
209 | ||
b41aac4f LPF |
210 | #define PHYS_SECTION_UNASSIGNED 0 |
211 | #define PHYS_SECTION_NOTDIRTY 1 | |
212 | #define PHYS_SECTION_ROM 2 | |
213 | #define PHYS_SECTION_WATCH 3 | |
5312bd8b | 214 | |
e2eef170 | 215 | static void io_mem_init(void); |
62152b8a | 216 | static void memory_map_init(void); |
09daed84 | 217 | static void tcg_commit(MemoryListener *listener); |
e2eef170 | 218 | |
1ec9b909 | 219 | static MemoryRegion io_mem_watch; |
32857f4d PM |
220 | |
221 | /** | |
222 | * CPUAddressSpace: all the information a CPU needs about an AddressSpace | |
223 | * @cpu: the CPU whose AddressSpace this is | |
224 | * @as: the AddressSpace itself | |
225 | * @memory_dispatch: its dispatch pointer (cached, RCU protected) | |
226 | * @tcg_as_listener: listener for tracking changes to the AddressSpace | |
227 | */ | |
228 | struct CPUAddressSpace { | |
229 | CPUState *cpu; | |
230 | AddressSpace *as; | |
231 | struct AddressSpaceDispatch *memory_dispatch; | |
232 | MemoryListener tcg_as_listener; | |
233 | }; | |
234 | ||
8deaf12c GH |
235 | struct DirtyBitmapSnapshot { |
236 | ram_addr_t start; | |
237 | ram_addr_t end; | |
238 | unsigned long dirty[]; | |
239 | }; | |
240 | ||
6658ffb8 | 241 | #endif |
fd6ce8f6 | 242 | |
6d9a1304 | 243 | #if !defined(CONFIG_USER_ONLY) |
d6f2ea22 | 244 | |
53cb28cb | 245 | static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes) |
d6f2ea22 | 246 | { |
101420b8 | 247 | static unsigned alloc_hint = 16; |
53cb28cb | 248 | if (map->nodes_nb + nodes > map->nodes_nb_alloc) { |
101420b8 | 249 | map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint); |
53cb28cb MA |
250 | map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes); |
251 | map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc); | |
101420b8 | 252 | alloc_hint = map->nodes_nb_alloc; |
d6f2ea22 | 253 | } |
f7bf5461 AK |
254 | } |
255 | ||
db94604b | 256 | static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf) |
f7bf5461 AK |
257 | { |
258 | unsigned i; | |
8b795765 | 259 | uint32_t ret; |
db94604b PB |
260 | PhysPageEntry e; |
261 | PhysPageEntry *p; | |
f7bf5461 | 262 | |
53cb28cb | 263 | ret = map->nodes_nb++; |
db94604b | 264 | p = map->nodes[ret]; |
f7bf5461 | 265 | assert(ret != PHYS_MAP_NODE_NIL); |
53cb28cb | 266 | assert(ret != map->nodes_nb_alloc); |
db94604b PB |
267 | |
268 | e.skip = leaf ? 0 : 1; | |
269 | e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL; | |
03f49957 | 270 | for (i = 0; i < P_L2_SIZE; ++i) { |
db94604b | 271 | memcpy(&p[i], &e, sizeof(e)); |
d6f2ea22 | 272 | } |
f7bf5461 | 273 | return ret; |
d6f2ea22 AK |
274 | } |
275 | ||
53cb28cb MA |
276 | static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp, |
277 | hwaddr *index, hwaddr *nb, uint16_t leaf, | |
2999097b | 278 | int level) |
f7bf5461 AK |
279 | { |
280 | PhysPageEntry *p; | |
03f49957 | 281 | hwaddr step = (hwaddr)1 << (level * P_L2_BITS); |
108c49b8 | 282 | |
9736e55b | 283 | if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) { |
db94604b | 284 | lp->ptr = phys_map_node_alloc(map, level == 0); |
92e873b9 | 285 | } |
db94604b | 286 | p = map->nodes[lp->ptr]; |
03f49957 | 287 | lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)]; |
f7bf5461 | 288 | |
03f49957 | 289 | while (*nb && lp < &p[P_L2_SIZE]) { |
07f07b31 | 290 | if ((*index & (step - 1)) == 0 && *nb >= step) { |
9736e55b | 291 | lp->skip = 0; |
c19e8800 | 292 | lp->ptr = leaf; |
07f07b31 AK |
293 | *index += step; |
294 | *nb -= step; | |
2999097b | 295 | } else { |
53cb28cb | 296 | phys_page_set_level(map, lp, index, nb, leaf, level - 1); |
2999097b AK |
297 | } |
298 | ++lp; | |
f7bf5461 AK |
299 | } |
300 | } | |
301 | ||
ac1970fb | 302 | static void phys_page_set(AddressSpaceDispatch *d, |
a8170e5e | 303 | hwaddr index, hwaddr nb, |
2999097b | 304 | uint16_t leaf) |
f7bf5461 | 305 | { |
2999097b | 306 | /* Wildly overreserve - it doesn't matter much. */ |
53cb28cb | 307 | phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS); |
5cd2c5b6 | 308 | |
53cb28cb | 309 | phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); |
92e873b9 FB |
310 | } |
311 | ||
b35ba30f MT |
312 | /* Compact a non leaf page entry. Simply detect that the entry has a single child, |
313 | * and update our entry so we can skip it and go directly to the destination. | |
314 | */ | |
efee678d | 315 | static void phys_page_compact(PhysPageEntry *lp, Node *nodes) |
b35ba30f MT |
316 | { |
317 | unsigned valid_ptr = P_L2_SIZE; | |
318 | int valid = 0; | |
319 | PhysPageEntry *p; | |
320 | int i; | |
321 | ||
322 | if (lp->ptr == PHYS_MAP_NODE_NIL) { | |
323 | return; | |
324 | } | |
325 | ||
326 | p = nodes[lp->ptr]; | |
327 | for (i = 0; i < P_L2_SIZE; i++) { | |
328 | if (p[i].ptr == PHYS_MAP_NODE_NIL) { | |
329 | continue; | |
330 | } | |
331 | ||
332 | valid_ptr = i; | |
333 | valid++; | |
334 | if (p[i].skip) { | |
efee678d | 335 | phys_page_compact(&p[i], nodes); |
b35ba30f MT |
336 | } |
337 | } | |
338 | ||
339 | /* We can only compress if there's only one child. */ | |
340 | if (valid != 1) { | |
341 | return; | |
342 | } | |
343 | ||
344 | assert(valid_ptr < P_L2_SIZE); | |
345 | ||
346 | /* Don't compress if it won't fit in the # of bits we have. */ | |
347 | if (lp->skip + p[valid_ptr].skip >= (1 << 3)) { | |
348 | return; | |
349 | } | |
350 | ||
351 | lp->ptr = p[valid_ptr].ptr; | |
352 | if (!p[valid_ptr].skip) { | |
353 | /* If our only child is a leaf, make this a leaf. */ | |
354 | /* By design, we should have made this node a leaf to begin with so we | |
355 | * should never reach here. | |
356 | * But since it's so simple to handle this, let's do it just in case we | |
357 | * change this rule. | |
358 | */ | |
359 | lp->skip = 0; | |
360 | } else { | |
361 | lp->skip += p[valid_ptr].skip; | |
362 | } | |
363 | } | |
364 | ||
365 | static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb) | |
366 | { | |
b35ba30f | 367 | if (d->phys_map.skip) { |
efee678d | 368 | phys_page_compact(&d->phys_map, d->map.nodes); |
b35ba30f MT |
369 | } |
370 | } | |
371 | ||
29cb533d FZ |
372 | static inline bool section_covers_addr(const MemoryRegionSection *section, |
373 | hwaddr addr) | |
374 | { | |
375 | /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means | |
376 | * the section must cover the entire address space. | |
377 | */ | |
258dfaaa | 378 | return int128_gethi(section->size) || |
29cb533d | 379 | range_covers_byte(section->offset_within_address_space, |
258dfaaa | 380 | int128_getlo(section->size), addr); |
29cb533d FZ |
381 | } |
382 | ||
003a0cf2 | 383 | static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr) |
92e873b9 | 384 | { |
003a0cf2 PX |
385 | PhysPageEntry lp = d->phys_map, *p; |
386 | Node *nodes = d->map.nodes; | |
387 | MemoryRegionSection *sections = d->map.sections; | |
97115a8d | 388 | hwaddr index = addr >> TARGET_PAGE_BITS; |
31ab2b4a | 389 | int i; |
f1f6e3b8 | 390 | |
9736e55b | 391 | for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) { |
c19e8800 | 392 | if (lp.ptr == PHYS_MAP_NODE_NIL) { |
9affd6fc | 393 | return §ions[PHYS_SECTION_UNASSIGNED]; |
31ab2b4a | 394 | } |
9affd6fc | 395 | p = nodes[lp.ptr]; |
03f49957 | 396 | lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)]; |
5312bd8b | 397 | } |
b35ba30f | 398 | |
29cb533d | 399 | if (section_covers_addr(§ions[lp.ptr], addr)) { |
b35ba30f MT |
400 | return §ions[lp.ptr]; |
401 | } else { | |
402 | return §ions[PHYS_SECTION_UNASSIGNED]; | |
403 | } | |
f3705d53 AK |
404 | } |
405 | ||
e5548617 BS |
406 | bool memory_region_is_unassigned(MemoryRegion *mr) |
407 | { | |
2a8e7499 | 408 | return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device |
5b6dd868 | 409 | && mr != &io_mem_watch; |
fd6ce8f6 | 410 | } |
149f54b5 | 411 | |
79e2b9ae | 412 | /* Called from RCU critical section */ |
c7086b4a | 413 | static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d, |
90260c6c JK |
414 | hwaddr addr, |
415 | bool resolve_subpage) | |
9f029603 | 416 | { |
729633c2 | 417 | MemoryRegionSection *section = atomic_read(&d->mru_section); |
90260c6c | 418 | subpage_t *subpage; |
729633c2 | 419 | bool update; |
90260c6c | 420 | |
729633c2 FZ |
421 | if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] && |
422 | section_covers_addr(section, addr)) { | |
423 | update = false; | |
424 | } else { | |
003a0cf2 | 425 | section = phys_page_find(d, addr); |
729633c2 FZ |
426 | update = true; |
427 | } | |
90260c6c JK |
428 | if (resolve_subpage && section->mr->subpage) { |
429 | subpage = container_of(section->mr, subpage_t, iomem); | |
53cb28cb | 430 | section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]]; |
90260c6c | 431 | } |
729633c2 FZ |
432 | if (update) { |
433 | atomic_set(&d->mru_section, section); | |
434 | } | |
90260c6c | 435 | return section; |
9f029603 JK |
436 | } |
437 | ||
79e2b9ae | 438 | /* Called from RCU critical section */ |
90260c6c | 439 | static MemoryRegionSection * |
c7086b4a | 440 | address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat, |
90260c6c | 441 | hwaddr *plen, bool resolve_subpage) |
149f54b5 PB |
442 | { |
443 | MemoryRegionSection *section; | |
965eb2fc | 444 | MemoryRegion *mr; |
a87f3954 | 445 | Int128 diff; |
149f54b5 | 446 | |
c7086b4a | 447 | section = address_space_lookup_region(d, addr, resolve_subpage); |
149f54b5 PB |
448 | /* Compute offset within MemoryRegionSection */ |
449 | addr -= section->offset_within_address_space; | |
450 | ||
451 | /* Compute offset within MemoryRegion */ | |
452 | *xlat = addr + section->offset_within_region; | |
453 | ||
965eb2fc | 454 | mr = section->mr; |
b242e0e0 PB |
455 | |
456 | /* MMIO registers can be expected to perform full-width accesses based only | |
457 | * on their address, without considering adjacent registers that could | |
458 | * decode to completely different MemoryRegions. When such registers | |
459 | * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO | |
460 | * regions overlap wildly. For this reason we cannot clamp the accesses | |
461 | * here. | |
462 | * | |
463 | * If the length is small (as is the case for address_space_ldl/stl), | |
464 | * everything works fine. If the incoming length is large, however, | |
465 | * the caller really has to do the clamping through memory_access_size. | |
466 | */ | |
965eb2fc | 467 | if (memory_region_is_ram(mr)) { |
e4a511f8 | 468 | diff = int128_sub(section->size, int128_make64(addr)); |
965eb2fc PB |
469 | *plen = int128_get64(int128_min(diff, int128_make64(*plen))); |
470 | } | |
149f54b5 PB |
471 | return section; |
472 | } | |
90260c6c | 473 | |
41063e1e | 474 | /* Called from RCU critical section */ |
a764040c PX |
475 | static MemoryRegionSection address_space_do_translate(AddressSpace *as, |
476 | hwaddr addr, | |
477 | hwaddr *xlat, | |
478 | hwaddr *plen, | |
479 | bool is_write, | |
480 | bool is_mmio) | |
052c8fa9 | 481 | { |
a764040c | 482 | IOMMUTLBEntry iotlb; |
052c8fa9 | 483 | MemoryRegionSection *section; |
3df9d748 | 484 | IOMMUMemoryRegion *iommu_mr; |
1221a474 | 485 | IOMMUMemoryRegionClass *imrc; |
052c8fa9 JW |
486 | |
487 | for (;;) { | |
488 | AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch); | |
a764040c | 489 | section = address_space_translate_internal(d, addr, &addr, plen, is_mmio); |
052c8fa9 | 490 | |
3df9d748 AK |
491 | iommu_mr = memory_region_get_iommu(section->mr); |
492 | if (!iommu_mr) { | |
052c8fa9 JW |
493 | break; |
494 | } | |
1221a474 | 495 | imrc = memory_region_get_iommu_class_nocheck(iommu_mr); |
052c8fa9 | 496 | |
1221a474 AK |
497 | iotlb = imrc->translate(iommu_mr, addr, is_write ? |
498 | IOMMU_WO : IOMMU_RO); | |
a764040c PX |
499 | addr = ((iotlb.translated_addr & ~iotlb.addr_mask) |
500 | | (addr & iotlb.addr_mask)); | |
501 | *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1); | |
052c8fa9 | 502 | if (!(iotlb.perm & (1 << is_write))) { |
a764040c | 503 | goto translate_fail; |
052c8fa9 JW |
504 | } |
505 | ||
052c8fa9 JW |
506 | as = iotlb.target_as; |
507 | } | |
508 | ||
a764040c PX |
509 | *xlat = addr; |
510 | ||
511 | return *section; | |
512 | ||
513 | translate_fail: | |
514 | return (MemoryRegionSection) { .mr = &io_mem_unassigned }; | |
052c8fa9 JW |
515 | } |
516 | ||
517 | /* Called from RCU critical section */ | |
a764040c PX |
518 | IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, |
519 | bool is_write) | |
90260c6c | 520 | { |
a764040c PX |
521 | MemoryRegionSection section; |
522 | hwaddr xlat, plen; | |
30951157 | 523 | |
a764040c PX |
524 | /* Try to get maximum page mask during translation. */ |
525 | plen = (hwaddr)-1; | |
30951157 | 526 | |
a764040c PX |
527 | /* This can never be MMIO. */ |
528 | section = address_space_do_translate(as, addr, &xlat, &plen, | |
529 | is_write, false); | |
30951157 | 530 | |
a764040c PX |
531 | /* Illegal translation */ |
532 | if (section.mr == &io_mem_unassigned) { | |
533 | goto iotlb_fail; | |
534 | } | |
30951157 | 535 | |
a764040c PX |
536 | /* Convert memory region offset into address space offset */ |
537 | xlat += section.offset_within_address_space - | |
538 | section.offset_within_region; | |
539 | ||
540 | if (plen == (hwaddr)-1) { | |
541 | /* | |
542 | * We use default page size here. Logically it only happens | |
543 | * for identity mappings. | |
544 | */ | |
545 | plen = TARGET_PAGE_SIZE; | |
30951157 AK |
546 | } |
547 | ||
a764040c PX |
548 | /* Convert to address mask */ |
549 | plen -= 1; | |
550 | ||
551 | return (IOMMUTLBEntry) { | |
552 | .target_as = section.address_space, | |
553 | .iova = addr & ~plen, | |
554 | .translated_addr = xlat & ~plen, | |
555 | .addr_mask = plen, | |
556 | /* IOTLBs are for DMAs, and DMA only allows on RAMs. */ | |
557 | .perm = IOMMU_RW, | |
558 | }; | |
559 | ||
560 | iotlb_fail: | |
561 | return (IOMMUTLBEntry) {0}; | |
562 | } | |
563 | ||
564 | /* Called from RCU critical section */ | |
565 | MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, | |
566 | hwaddr *xlat, hwaddr *plen, | |
567 | bool is_write) | |
568 | { | |
569 | MemoryRegion *mr; | |
570 | MemoryRegionSection section; | |
571 | ||
572 | /* This can be MMIO, so setup MMIO bit. */ | |
573 | section = address_space_do_translate(as, addr, xlat, plen, is_write, true); | |
574 | mr = section.mr; | |
575 | ||
fe680d0d | 576 | if (xen_enabled() && memory_access_is_direct(mr, is_write)) { |
a87f3954 | 577 | hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr; |
23820dbf | 578 | *plen = MIN(page, *plen); |
a87f3954 PB |
579 | } |
580 | ||
30951157 | 581 | return mr; |
90260c6c JK |
582 | } |
583 | ||
79e2b9ae | 584 | /* Called from RCU critical section */ |
90260c6c | 585 | MemoryRegionSection * |
d7898cda | 586 | address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, |
9d82b5a7 | 587 | hwaddr *xlat, hwaddr *plen) |
90260c6c | 588 | { |
30951157 | 589 | MemoryRegionSection *section; |
f35e44e7 | 590 | AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch); |
d7898cda PM |
591 | |
592 | section = address_space_translate_internal(d, addr, xlat, plen, false); | |
30951157 | 593 | |
3df9d748 | 594 | assert(!memory_region_is_iommu(section->mr)); |
30951157 | 595 | return section; |
90260c6c | 596 | } |
5b6dd868 | 597 | #endif |
fd6ce8f6 | 598 | |
b170fce3 | 599 | #if !defined(CONFIG_USER_ONLY) |
5b6dd868 BS |
600 | |
601 | static int cpu_common_post_load(void *opaque, int version_id) | |
fd6ce8f6 | 602 | { |
259186a7 | 603 | CPUState *cpu = opaque; |
a513fe19 | 604 | |
5b6dd868 BS |
605 | /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the |
606 | version_id is increased. */ | |
259186a7 | 607 | cpu->interrupt_request &= ~0x01; |
d10eb08f | 608 | tlb_flush(cpu); |
5b6dd868 BS |
609 | |
610 | return 0; | |
a513fe19 | 611 | } |
7501267e | 612 | |
6c3bff0e PD |
613 | static int cpu_common_pre_load(void *opaque) |
614 | { | |
615 | CPUState *cpu = opaque; | |
616 | ||
adee6424 | 617 | cpu->exception_index = -1; |
6c3bff0e PD |
618 | |
619 | return 0; | |
620 | } | |
621 | ||
622 | static bool cpu_common_exception_index_needed(void *opaque) | |
623 | { | |
624 | CPUState *cpu = opaque; | |
625 | ||
adee6424 | 626 | return tcg_enabled() && cpu->exception_index != -1; |
6c3bff0e PD |
627 | } |
628 | ||
629 | static const VMStateDescription vmstate_cpu_common_exception_index = { | |
630 | .name = "cpu_common/exception_index", | |
631 | .version_id = 1, | |
632 | .minimum_version_id = 1, | |
5cd8cada | 633 | .needed = cpu_common_exception_index_needed, |
6c3bff0e PD |
634 | .fields = (VMStateField[]) { |
635 | VMSTATE_INT32(exception_index, CPUState), | |
636 | VMSTATE_END_OF_LIST() | |
637 | } | |
638 | }; | |
639 | ||
bac05aa9 AS |
640 | static bool cpu_common_crash_occurred_needed(void *opaque) |
641 | { | |
642 | CPUState *cpu = opaque; | |
643 | ||
644 | return cpu->crash_occurred; | |
645 | } | |
646 | ||
647 | static const VMStateDescription vmstate_cpu_common_crash_occurred = { | |
648 | .name = "cpu_common/crash_occurred", | |
649 | .version_id = 1, | |
650 | .minimum_version_id = 1, | |
651 | .needed = cpu_common_crash_occurred_needed, | |
652 | .fields = (VMStateField[]) { | |
653 | VMSTATE_BOOL(crash_occurred, CPUState), | |
654 | VMSTATE_END_OF_LIST() | |
655 | } | |
656 | }; | |
657 | ||
1a1562f5 | 658 | const VMStateDescription vmstate_cpu_common = { |
5b6dd868 BS |
659 | .name = "cpu_common", |
660 | .version_id = 1, | |
661 | .minimum_version_id = 1, | |
6c3bff0e | 662 | .pre_load = cpu_common_pre_load, |
5b6dd868 | 663 | .post_load = cpu_common_post_load, |
35d08458 | 664 | .fields = (VMStateField[]) { |
259186a7 AF |
665 | VMSTATE_UINT32(halted, CPUState), |
666 | VMSTATE_UINT32(interrupt_request, CPUState), | |
5b6dd868 | 667 | VMSTATE_END_OF_LIST() |
6c3bff0e | 668 | }, |
5cd8cada JQ |
669 | .subsections = (const VMStateDescription*[]) { |
670 | &vmstate_cpu_common_exception_index, | |
bac05aa9 | 671 | &vmstate_cpu_common_crash_occurred, |
5cd8cada | 672 | NULL |
5b6dd868 BS |
673 | } |
674 | }; | |
1a1562f5 | 675 | |
5b6dd868 | 676 | #endif |
ea041c0e | 677 | |
38d8f5c8 | 678 | CPUState *qemu_get_cpu(int index) |
ea041c0e | 679 | { |
bdc44640 | 680 | CPUState *cpu; |
ea041c0e | 681 | |
bdc44640 | 682 | CPU_FOREACH(cpu) { |
55e5c285 | 683 | if (cpu->cpu_index == index) { |
bdc44640 | 684 | return cpu; |
55e5c285 | 685 | } |
ea041c0e | 686 | } |
5b6dd868 | 687 | |
bdc44640 | 688 | return NULL; |
ea041c0e FB |
689 | } |
690 | ||
09daed84 | 691 | #if !defined(CONFIG_USER_ONLY) |
56943e8c | 692 | void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx) |
09daed84 | 693 | { |
12ebc9a7 PM |
694 | CPUAddressSpace *newas; |
695 | ||
696 | /* Target code should have set num_ases before calling us */ | |
697 | assert(asidx < cpu->num_ases); | |
698 | ||
56943e8c PM |
699 | if (asidx == 0) { |
700 | /* address space 0 gets the convenience alias */ | |
701 | cpu->as = as; | |
702 | } | |
703 | ||
12ebc9a7 PM |
704 | /* KVM cannot currently support multiple address spaces. */ |
705 | assert(asidx == 0 || !kvm_enabled()); | |
09daed84 | 706 | |
12ebc9a7 PM |
707 | if (!cpu->cpu_ases) { |
708 | cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases); | |
09daed84 | 709 | } |
32857f4d | 710 | |
12ebc9a7 PM |
711 | newas = &cpu->cpu_ases[asidx]; |
712 | newas->cpu = cpu; | |
713 | newas->as = as; | |
56943e8c | 714 | if (tcg_enabled()) { |
12ebc9a7 PM |
715 | newas->tcg_as_listener.commit = tcg_commit; |
716 | memory_listener_register(&newas->tcg_as_listener, as); | |
56943e8c | 717 | } |
09daed84 | 718 | } |
651a5bc0 PM |
719 | |
720 | AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx) | |
721 | { | |
722 | /* Return the AddressSpace corresponding to the specified index */ | |
723 | return cpu->cpu_ases[asidx].as; | |
724 | } | |
09daed84 EI |
725 | #endif |
726 | ||
7bbc124e | 727 | void cpu_exec_unrealizefn(CPUState *cpu) |
1c59eb39 | 728 | { |
9dfeca7c BR |
729 | CPUClass *cc = CPU_GET_CLASS(cpu); |
730 | ||
267f685b | 731 | cpu_list_remove(cpu); |
9dfeca7c BR |
732 | |
733 | if (cc->vmsd != NULL) { | |
734 | vmstate_unregister(NULL, cc->vmsd, cpu); | |
735 | } | |
736 | if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { | |
737 | vmstate_unregister(NULL, &vmstate_cpu_common, cpu); | |
738 | } | |
1c59eb39 BR |
739 | } |
740 | ||
c7e002c5 FZ |
741 | Property cpu_common_props[] = { |
742 | #ifndef CONFIG_USER_ONLY | |
743 | /* Create a memory property for softmmu CPU object, | |
744 | * so users can wire up its memory. (This can't go in qom/cpu.c | |
745 | * because that file is compiled only once for both user-mode | |
746 | * and system builds.) The default if no link is set up is to use | |
747 | * the system address space. | |
748 | */ | |
749 | DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION, | |
750 | MemoryRegion *), | |
751 | #endif | |
752 | DEFINE_PROP_END_OF_LIST(), | |
753 | }; | |
754 | ||
39e329e3 | 755 | void cpu_exec_initfn(CPUState *cpu) |
ea041c0e | 756 | { |
56943e8c | 757 | cpu->as = NULL; |
12ebc9a7 | 758 | cpu->num_ases = 0; |
56943e8c | 759 | |
291135b5 | 760 | #ifndef CONFIG_USER_ONLY |
291135b5 | 761 | cpu->thread_id = qemu_get_thread_id(); |
6731d864 PC |
762 | cpu->memory = system_memory; |
763 | object_ref(OBJECT(cpu->memory)); | |
291135b5 | 764 | #endif |
39e329e3 LV |
765 | } |
766 | ||
ce5b1bbf | 767 | void cpu_exec_realizefn(CPUState *cpu, Error **errp) |
39e329e3 LV |
768 | { |
769 | CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu); | |
291135b5 | 770 | |
267f685b | 771 | cpu_list_add(cpu); |
1bc7e522 IM |
772 | |
773 | #ifndef CONFIG_USER_ONLY | |
e0d47944 | 774 | if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { |
741da0d3 | 775 | vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu); |
e0d47944 | 776 | } |
b170fce3 | 777 | if (cc->vmsd != NULL) { |
741da0d3 | 778 | vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu); |
b170fce3 | 779 | } |
741da0d3 | 780 | #endif |
ea041c0e FB |
781 | } |
782 | ||
406bc339 | 783 | #if defined(CONFIG_USER_ONLY) |
00b941e5 | 784 | static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) |
1e7855a5 | 785 | { |
406bc339 PK |
786 | mmap_lock(); |
787 | tb_lock(); | |
788 | tb_invalidate_phys_page_range(pc, pc + 1, 0); | |
789 | tb_unlock(); | |
790 | mmap_unlock(); | |
791 | } | |
792 | #else | |
793 | static void breakpoint_invalidate(CPUState *cpu, target_ulong pc) | |
794 | { | |
795 | MemTxAttrs attrs; | |
796 | hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs); | |
797 | int asidx = cpu_asidx_from_attrs(cpu, attrs); | |
798 | if (phys != -1) { | |
799 | /* Locks grabbed by tb_invalidate_phys_addr */ | |
800 | tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as, | |
801 | phys | (pc & ~TARGET_PAGE_MASK)); | |
802 | } | |
1e7855a5 | 803 | } |
406bc339 | 804 | #endif |
d720b93d | 805 | |
c527ee8f | 806 | #if defined(CONFIG_USER_ONLY) |
75a34036 | 807 | void cpu_watchpoint_remove_all(CPUState *cpu, int mask) |
c527ee8f PB |
808 | |
809 | { | |
810 | } | |
811 | ||
3ee887e8 PM |
812 | int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, |
813 | int flags) | |
814 | { | |
815 | return -ENOSYS; | |
816 | } | |
817 | ||
818 | void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) | |
819 | { | |
820 | } | |
821 | ||
75a34036 | 822 | int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, |
c527ee8f PB |
823 | int flags, CPUWatchpoint **watchpoint) |
824 | { | |
825 | return -ENOSYS; | |
826 | } | |
827 | #else | |
6658ffb8 | 828 | /* Add a watchpoint. */ |
75a34036 | 829 | int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len, |
a1d1bb31 | 830 | int flags, CPUWatchpoint **watchpoint) |
6658ffb8 | 831 | { |
c0ce998e | 832 | CPUWatchpoint *wp; |
6658ffb8 | 833 | |
05068c0d | 834 | /* forbid ranges which are empty or run off the end of the address space */ |
07e2863d | 835 | if (len == 0 || (addr + len - 1) < addr) { |
75a34036 AF |
836 | error_report("tried to set invalid watchpoint at %" |
837 | VADDR_PRIx ", len=%" VADDR_PRIu, addr, len); | |
b4051334 AL |
838 | return -EINVAL; |
839 | } | |
7267c094 | 840 | wp = g_malloc(sizeof(*wp)); |
a1d1bb31 AL |
841 | |
842 | wp->vaddr = addr; | |
05068c0d | 843 | wp->len = len; |
a1d1bb31 AL |
844 | wp->flags = flags; |
845 | ||
2dc9f411 | 846 | /* keep all GDB-injected watchpoints in front */ |
ff4700b0 AF |
847 | if (flags & BP_GDB) { |
848 | QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry); | |
849 | } else { | |
850 | QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry); | |
851 | } | |
6658ffb8 | 852 | |
31b030d4 | 853 | tlb_flush_page(cpu, addr); |
a1d1bb31 AL |
854 | |
855 | if (watchpoint) | |
856 | *watchpoint = wp; | |
857 | return 0; | |
6658ffb8 PB |
858 | } |
859 | ||
a1d1bb31 | 860 | /* Remove a specific watchpoint. */ |
75a34036 | 861 | int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len, |
a1d1bb31 | 862 | int flags) |
6658ffb8 | 863 | { |
a1d1bb31 | 864 | CPUWatchpoint *wp; |
6658ffb8 | 865 | |
ff4700b0 | 866 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { |
05068c0d | 867 | if (addr == wp->vaddr && len == wp->len |
6e140f28 | 868 | && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) { |
75a34036 | 869 | cpu_watchpoint_remove_by_ref(cpu, wp); |
6658ffb8 PB |
870 | return 0; |
871 | } | |
872 | } | |
a1d1bb31 | 873 | return -ENOENT; |
6658ffb8 PB |
874 | } |
875 | ||
a1d1bb31 | 876 | /* Remove a specific watchpoint by reference. */ |
75a34036 | 877 | void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint) |
a1d1bb31 | 878 | { |
ff4700b0 | 879 | QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry); |
7d03f82f | 880 | |
31b030d4 | 881 | tlb_flush_page(cpu, watchpoint->vaddr); |
a1d1bb31 | 882 | |
7267c094 | 883 | g_free(watchpoint); |
a1d1bb31 AL |
884 | } |
885 | ||
886 | /* Remove all matching watchpoints. */ | |
75a34036 | 887 | void cpu_watchpoint_remove_all(CPUState *cpu, int mask) |
a1d1bb31 | 888 | { |
c0ce998e | 889 | CPUWatchpoint *wp, *next; |
a1d1bb31 | 890 | |
ff4700b0 | 891 | QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) { |
75a34036 AF |
892 | if (wp->flags & mask) { |
893 | cpu_watchpoint_remove_by_ref(cpu, wp); | |
894 | } | |
c0ce998e | 895 | } |
7d03f82f | 896 | } |
05068c0d PM |
897 | |
898 | /* Return true if this watchpoint address matches the specified | |
899 | * access (ie the address range covered by the watchpoint overlaps | |
900 | * partially or completely with the address range covered by the | |
901 | * access). | |
902 | */ | |
903 | static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp, | |
904 | vaddr addr, | |
905 | vaddr len) | |
906 | { | |
907 | /* We know the lengths are non-zero, but a little caution is | |
908 | * required to avoid errors in the case where the range ends | |
909 | * exactly at the top of the address space and so addr + len | |
910 | * wraps round to zero. | |
911 | */ | |
912 | vaddr wpend = wp->vaddr + wp->len - 1; | |
913 | vaddr addrend = addr + len - 1; | |
914 | ||
915 | return !(addr > wpend || wp->vaddr > addrend); | |
916 | } | |
917 | ||
c527ee8f | 918 | #endif |
7d03f82f | 919 | |
a1d1bb31 | 920 | /* Add a breakpoint. */ |
b3310ab3 | 921 | int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, |
a1d1bb31 | 922 | CPUBreakpoint **breakpoint) |
4c3a88a2 | 923 | { |
c0ce998e | 924 | CPUBreakpoint *bp; |
3b46e624 | 925 | |
7267c094 | 926 | bp = g_malloc(sizeof(*bp)); |
4c3a88a2 | 927 | |
a1d1bb31 AL |
928 | bp->pc = pc; |
929 | bp->flags = flags; | |
930 | ||
2dc9f411 | 931 | /* keep all GDB-injected breakpoints in front */ |
00b941e5 | 932 | if (flags & BP_GDB) { |
f0c3c505 | 933 | QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry); |
00b941e5 | 934 | } else { |
f0c3c505 | 935 | QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry); |
00b941e5 | 936 | } |
3b46e624 | 937 | |
f0c3c505 | 938 | breakpoint_invalidate(cpu, pc); |
a1d1bb31 | 939 | |
00b941e5 | 940 | if (breakpoint) { |
a1d1bb31 | 941 | *breakpoint = bp; |
00b941e5 | 942 | } |
4c3a88a2 | 943 | return 0; |
4c3a88a2 FB |
944 | } |
945 | ||
a1d1bb31 | 946 | /* Remove a specific breakpoint. */ |
b3310ab3 | 947 | int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) |
a1d1bb31 | 948 | { |
a1d1bb31 AL |
949 | CPUBreakpoint *bp; |
950 | ||
f0c3c505 | 951 | QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { |
a1d1bb31 | 952 | if (bp->pc == pc && bp->flags == flags) { |
b3310ab3 | 953 | cpu_breakpoint_remove_by_ref(cpu, bp); |
a1d1bb31 AL |
954 | return 0; |
955 | } | |
7d03f82f | 956 | } |
a1d1bb31 | 957 | return -ENOENT; |
7d03f82f EI |
958 | } |
959 | ||
a1d1bb31 | 960 | /* Remove a specific breakpoint by reference. */ |
b3310ab3 | 961 | void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint) |
4c3a88a2 | 962 | { |
f0c3c505 AF |
963 | QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry); |
964 | ||
965 | breakpoint_invalidate(cpu, breakpoint->pc); | |
a1d1bb31 | 966 | |
7267c094 | 967 | g_free(breakpoint); |
a1d1bb31 AL |
968 | } |
969 | ||
970 | /* Remove all matching breakpoints. */ | |
b3310ab3 | 971 | void cpu_breakpoint_remove_all(CPUState *cpu, int mask) |
a1d1bb31 | 972 | { |
c0ce998e | 973 | CPUBreakpoint *bp, *next; |
a1d1bb31 | 974 | |
f0c3c505 | 975 | QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) { |
b3310ab3 AF |
976 | if (bp->flags & mask) { |
977 | cpu_breakpoint_remove_by_ref(cpu, bp); | |
978 | } | |
c0ce998e | 979 | } |
4c3a88a2 FB |
980 | } |
981 | ||
c33a346e FB |
982 | /* enable or disable single step mode. EXCP_DEBUG is returned by the |
983 | CPU loop after each instruction */ | |
3825b28f | 984 | void cpu_single_step(CPUState *cpu, int enabled) |
c33a346e | 985 | { |
ed2803da AF |
986 | if (cpu->singlestep_enabled != enabled) { |
987 | cpu->singlestep_enabled = enabled; | |
988 | if (kvm_enabled()) { | |
38e478ec | 989 | kvm_update_guest_debug(cpu, 0); |
ed2803da | 990 | } else { |
ccbb4d44 | 991 | /* must flush all the translated code to avoid inconsistencies */ |
e22a25c9 | 992 | /* XXX: only flush what is necessary */ |
bbd77c18 | 993 | tb_flush(cpu); |
e22a25c9 | 994 | } |
c33a346e | 995 | } |
c33a346e FB |
996 | } |
997 | ||
a47dddd7 | 998 | void cpu_abort(CPUState *cpu, const char *fmt, ...) |
7501267e FB |
999 | { |
1000 | va_list ap; | |
493ae1f0 | 1001 | va_list ap2; |
7501267e FB |
1002 | |
1003 | va_start(ap, fmt); | |
493ae1f0 | 1004 | va_copy(ap2, ap); |
7501267e FB |
1005 | fprintf(stderr, "qemu: fatal: "); |
1006 | vfprintf(stderr, fmt, ap); | |
1007 | fprintf(stderr, "\n"); | |
878096ee | 1008 | cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP); |
013a2942 | 1009 | if (qemu_log_separate()) { |
1ee73216 | 1010 | qemu_log_lock(); |
93fcfe39 AL |
1011 | qemu_log("qemu: fatal: "); |
1012 | qemu_log_vprintf(fmt, ap2); | |
1013 | qemu_log("\n"); | |
a0762859 | 1014 | log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP); |
31b1a7b4 | 1015 | qemu_log_flush(); |
1ee73216 | 1016 | qemu_log_unlock(); |
93fcfe39 | 1017 | qemu_log_close(); |
924edcae | 1018 | } |
493ae1f0 | 1019 | va_end(ap2); |
f9373291 | 1020 | va_end(ap); |
7615936e | 1021 | replay_finish(); |
fd052bf6 RV |
1022 | #if defined(CONFIG_USER_ONLY) |
1023 | { | |
1024 | struct sigaction act; | |
1025 | sigfillset(&act.sa_mask); | |
1026 | act.sa_handler = SIG_DFL; | |
1027 | sigaction(SIGABRT, &act, NULL); | |
1028 | } | |
1029 | #endif | |
7501267e FB |
1030 | abort(); |
1031 | } | |
1032 | ||
0124311e | 1033 | #if !defined(CONFIG_USER_ONLY) |
0dc3f44a | 1034 | /* Called from RCU critical section */ |
041603fe PB |
1035 | static RAMBlock *qemu_get_ram_block(ram_addr_t addr) |
1036 | { | |
1037 | RAMBlock *block; | |
1038 | ||
43771539 | 1039 | block = atomic_rcu_read(&ram_list.mru_block); |
9b8424d5 | 1040 | if (block && addr - block->offset < block->max_length) { |
68851b98 | 1041 | return block; |
041603fe | 1042 | } |
99e15582 | 1043 | RAMBLOCK_FOREACH(block) { |
9b8424d5 | 1044 | if (addr - block->offset < block->max_length) { |
041603fe PB |
1045 | goto found; |
1046 | } | |
1047 | } | |
1048 | ||
1049 | fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); | |
1050 | abort(); | |
1051 | ||
1052 | found: | |
43771539 PB |
1053 | /* It is safe to write mru_block outside the iothread lock. This |
1054 | * is what happens: | |
1055 | * | |
1056 | * mru_block = xxx | |
1057 | * rcu_read_unlock() | |
1058 | * xxx removed from list | |
1059 | * rcu_read_lock() | |
1060 | * read mru_block | |
1061 | * mru_block = NULL; | |
1062 | * call_rcu(reclaim_ramblock, xxx); | |
1063 | * rcu_read_unlock() | |
1064 | * | |
1065 | * atomic_rcu_set is not needed here. The block was already published | |
1066 | * when it was placed into the list. Here we're just making an extra | |
1067 | * copy of the pointer. | |
1068 | */ | |
041603fe PB |
1069 | ram_list.mru_block = block; |
1070 | return block; | |
1071 | } | |
1072 | ||
a2f4d5be | 1073 | static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length) |
d24981d3 | 1074 | { |
9a13565d | 1075 | CPUState *cpu; |
041603fe | 1076 | ram_addr_t start1; |
a2f4d5be JQ |
1077 | RAMBlock *block; |
1078 | ram_addr_t end; | |
1079 | ||
1080 | end = TARGET_PAGE_ALIGN(start + length); | |
1081 | start &= TARGET_PAGE_MASK; | |
d24981d3 | 1082 | |
0dc3f44a | 1083 | rcu_read_lock(); |
041603fe PB |
1084 | block = qemu_get_ram_block(start); |
1085 | assert(block == qemu_get_ram_block(end - 1)); | |
1240be24 | 1086 | start1 = (uintptr_t)ramblock_ptr(block, start - block->offset); |
9a13565d PC |
1087 | CPU_FOREACH(cpu) { |
1088 | tlb_reset_dirty(cpu, start1, length); | |
1089 | } | |
0dc3f44a | 1090 | rcu_read_unlock(); |
d24981d3 JQ |
1091 | } |
1092 | ||
5579c7f3 | 1093 | /* Note: start and end must be within the same ram block. */ |
03eebc9e SH |
1094 | bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, |
1095 | ram_addr_t length, | |
1096 | unsigned client) | |
1ccde1cb | 1097 | { |
5b82b703 | 1098 | DirtyMemoryBlocks *blocks; |
03eebc9e | 1099 | unsigned long end, page; |
5b82b703 | 1100 | bool dirty = false; |
03eebc9e SH |
1101 | |
1102 | if (length == 0) { | |
1103 | return false; | |
1104 | } | |
f23db169 | 1105 | |
03eebc9e SH |
1106 | end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; |
1107 | page = start >> TARGET_PAGE_BITS; | |
5b82b703 SH |
1108 | |
1109 | rcu_read_lock(); | |
1110 | ||
1111 | blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); | |
1112 | ||
1113 | while (page < end) { | |
1114 | unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; | |
1115 | unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE; | |
1116 | unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset); | |
1117 | ||
1118 | dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx], | |
1119 | offset, num); | |
1120 | page += num; | |
1121 | } | |
1122 | ||
1123 | rcu_read_unlock(); | |
03eebc9e SH |
1124 | |
1125 | if (dirty && tcg_enabled()) { | |
a2f4d5be | 1126 | tlb_reset_dirty_range_all(start, length); |
5579c7f3 | 1127 | } |
03eebc9e SH |
1128 | |
1129 | return dirty; | |
1ccde1cb FB |
1130 | } |
1131 | ||
8deaf12c GH |
1132 | DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty |
1133 | (ram_addr_t start, ram_addr_t length, unsigned client) | |
1134 | { | |
1135 | DirtyMemoryBlocks *blocks; | |
1136 | unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL); | |
1137 | ram_addr_t first = QEMU_ALIGN_DOWN(start, align); | |
1138 | ram_addr_t last = QEMU_ALIGN_UP(start + length, align); | |
1139 | DirtyBitmapSnapshot *snap; | |
1140 | unsigned long page, end, dest; | |
1141 | ||
1142 | snap = g_malloc0(sizeof(*snap) + | |
1143 | ((last - first) >> (TARGET_PAGE_BITS + 3))); | |
1144 | snap->start = first; | |
1145 | snap->end = last; | |
1146 | ||
1147 | page = first >> TARGET_PAGE_BITS; | |
1148 | end = last >> TARGET_PAGE_BITS; | |
1149 | dest = 0; | |
1150 | ||
1151 | rcu_read_lock(); | |
1152 | ||
1153 | blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); | |
1154 | ||
1155 | while (page < end) { | |
1156 | unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE; | |
1157 | unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE; | |
1158 | unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset); | |
1159 | ||
1160 | assert(QEMU_IS_ALIGNED(offset, (1 << BITS_PER_LEVEL))); | |
1161 | assert(QEMU_IS_ALIGNED(num, (1 << BITS_PER_LEVEL))); | |
1162 | offset >>= BITS_PER_LEVEL; | |
1163 | ||
1164 | bitmap_copy_and_clear_atomic(snap->dirty + dest, | |
1165 | blocks->blocks[idx] + offset, | |
1166 | num); | |
1167 | page += num; | |
1168 | dest += num >> BITS_PER_LEVEL; | |
1169 | } | |
1170 | ||
1171 | rcu_read_unlock(); | |
1172 | ||
1173 | if (tcg_enabled()) { | |
1174 | tlb_reset_dirty_range_all(start, length); | |
1175 | } | |
1176 | ||
1177 | return snap; | |
1178 | } | |
1179 | ||
1180 | bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap, | |
1181 | ram_addr_t start, | |
1182 | ram_addr_t length) | |
1183 | { | |
1184 | unsigned long page, end; | |
1185 | ||
1186 | assert(start >= snap->start); | |
1187 | assert(start + length <= snap->end); | |
1188 | ||
1189 | end = TARGET_PAGE_ALIGN(start + length - snap->start) >> TARGET_PAGE_BITS; | |
1190 | page = (start - snap->start) >> TARGET_PAGE_BITS; | |
1191 | ||
1192 | while (page < end) { | |
1193 | if (test_bit(page, snap->dirty)) { | |
1194 | return true; | |
1195 | } | |
1196 | page++; | |
1197 | } | |
1198 | return false; | |
1199 | } | |
1200 | ||
79e2b9ae | 1201 | /* Called from RCU critical section */ |
bb0e627a | 1202 | hwaddr memory_region_section_get_iotlb(CPUState *cpu, |
149f54b5 PB |
1203 | MemoryRegionSection *section, |
1204 | target_ulong vaddr, | |
1205 | hwaddr paddr, hwaddr xlat, | |
1206 | int prot, | |
1207 | target_ulong *address) | |
e5548617 | 1208 | { |
a8170e5e | 1209 | hwaddr iotlb; |
e5548617 BS |
1210 | CPUWatchpoint *wp; |
1211 | ||
cc5bea60 | 1212 | if (memory_region_is_ram(section->mr)) { |
e5548617 | 1213 | /* Normal RAM. */ |
e4e69794 | 1214 | iotlb = memory_region_get_ram_addr(section->mr) + xlat; |
e5548617 | 1215 | if (!section->readonly) { |
b41aac4f | 1216 | iotlb |= PHYS_SECTION_NOTDIRTY; |
e5548617 | 1217 | } else { |
b41aac4f | 1218 | iotlb |= PHYS_SECTION_ROM; |
e5548617 BS |
1219 | } |
1220 | } else { | |
0b8e2c10 PM |
1221 | AddressSpaceDispatch *d; |
1222 | ||
1223 | d = atomic_rcu_read(§ion->address_space->dispatch); | |
1224 | iotlb = section - d->map.sections; | |
149f54b5 | 1225 | iotlb += xlat; |
e5548617 BS |
1226 | } |
1227 | ||
1228 | /* Make accesses to pages with watchpoints go via the | |
1229 | watchpoint trap routines. */ | |
ff4700b0 | 1230 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { |
05068c0d | 1231 | if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) { |
e5548617 BS |
1232 | /* Avoid trapping reads of pages with a write breakpoint. */ |
1233 | if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { | |
b41aac4f | 1234 | iotlb = PHYS_SECTION_WATCH + paddr; |
e5548617 BS |
1235 | *address |= TLB_MMIO; |
1236 | break; | |
1237 | } | |
1238 | } | |
1239 | } | |
1240 | ||
1241 | return iotlb; | |
1242 | } | |
9fa3e853 FB |
1243 | #endif /* defined(CONFIG_USER_ONLY) */ |
1244 | ||
e2eef170 | 1245 | #if !defined(CONFIG_USER_ONLY) |
8da3ff18 | 1246 | |
c227f099 | 1247 | static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
5312bd8b | 1248 | uint16_t section); |
acc9d80b | 1249 | static subpage_t *subpage_init(AddressSpace *as, hwaddr base); |
54688b1e | 1250 | |
a2b257d6 IM |
1251 | static void *(*phys_mem_alloc)(size_t size, uint64_t *align) = |
1252 | qemu_anon_ram_alloc; | |
91138037 MA |
1253 | |
1254 | /* | |
1255 | * Set a custom physical guest memory alloator. | |
1256 | * Accelerators with unusual needs may need this. Hopefully, we can | |
1257 | * get rid of it eventually. | |
1258 | */ | |
a2b257d6 | 1259 | void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align)) |
91138037 MA |
1260 | { |
1261 | phys_mem_alloc = alloc; | |
1262 | } | |
1263 | ||
53cb28cb MA |
1264 | static uint16_t phys_section_add(PhysPageMap *map, |
1265 | MemoryRegionSection *section) | |
5312bd8b | 1266 | { |
68f3f65b PB |
1267 | /* The physical section number is ORed with a page-aligned |
1268 | * pointer to produce the iotlb entries. Thus it should | |
1269 | * never overflow into the page-aligned value. | |
1270 | */ | |
53cb28cb | 1271 | assert(map->sections_nb < TARGET_PAGE_SIZE); |
68f3f65b | 1272 | |
53cb28cb MA |
1273 | if (map->sections_nb == map->sections_nb_alloc) { |
1274 | map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16); | |
1275 | map->sections = g_renew(MemoryRegionSection, map->sections, | |
1276 | map->sections_nb_alloc); | |
5312bd8b | 1277 | } |
53cb28cb | 1278 | map->sections[map->sections_nb] = *section; |
dfde4e6e | 1279 | memory_region_ref(section->mr); |
53cb28cb | 1280 | return map->sections_nb++; |
5312bd8b AK |
1281 | } |
1282 | ||
058bc4b5 PB |
1283 | static void phys_section_destroy(MemoryRegion *mr) |
1284 | { | |
55b4e80b DS |
1285 | bool have_sub_page = mr->subpage; |
1286 | ||
dfde4e6e PB |
1287 | memory_region_unref(mr); |
1288 | ||
55b4e80b | 1289 | if (have_sub_page) { |
058bc4b5 | 1290 | subpage_t *subpage = container_of(mr, subpage_t, iomem); |
b4fefef9 | 1291 | object_unref(OBJECT(&subpage->iomem)); |
058bc4b5 PB |
1292 | g_free(subpage); |
1293 | } | |
1294 | } | |
1295 | ||
6092666e | 1296 | static void phys_sections_free(PhysPageMap *map) |
5312bd8b | 1297 | { |
9affd6fc PB |
1298 | while (map->sections_nb > 0) { |
1299 | MemoryRegionSection *section = &map->sections[--map->sections_nb]; | |
058bc4b5 PB |
1300 | phys_section_destroy(section->mr); |
1301 | } | |
9affd6fc PB |
1302 | g_free(map->sections); |
1303 | g_free(map->nodes); | |
5312bd8b AK |
1304 | } |
1305 | ||
ac1970fb | 1306 | static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section) |
0f0cb164 AK |
1307 | { |
1308 | subpage_t *subpage; | |
a8170e5e | 1309 | hwaddr base = section->offset_within_address_space |
0f0cb164 | 1310 | & TARGET_PAGE_MASK; |
003a0cf2 | 1311 | MemoryRegionSection *existing = phys_page_find(d, base); |
0f0cb164 AK |
1312 | MemoryRegionSection subsection = { |
1313 | .offset_within_address_space = base, | |
052e87b0 | 1314 | .size = int128_make64(TARGET_PAGE_SIZE), |
0f0cb164 | 1315 | }; |
a8170e5e | 1316 | hwaddr start, end; |
0f0cb164 | 1317 | |
f3705d53 | 1318 | assert(existing->mr->subpage || existing->mr == &io_mem_unassigned); |
0f0cb164 | 1319 | |
f3705d53 | 1320 | if (!(existing->mr->subpage)) { |
acc9d80b | 1321 | subpage = subpage_init(d->as, base); |
3be91e86 | 1322 | subsection.address_space = d->as; |
0f0cb164 | 1323 | subsection.mr = &subpage->iomem; |
ac1970fb | 1324 | phys_page_set(d, base >> TARGET_PAGE_BITS, 1, |
53cb28cb | 1325 | phys_section_add(&d->map, &subsection)); |
0f0cb164 | 1326 | } else { |
f3705d53 | 1327 | subpage = container_of(existing->mr, subpage_t, iomem); |
0f0cb164 AK |
1328 | } |
1329 | start = section->offset_within_address_space & ~TARGET_PAGE_MASK; | |
052e87b0 | 1330 | end = start + int128_get64(section->size) - 1; |
53cb28cb MA |
1331 | subpage_register(subpage, start, end, |
1332 | phys_section_add(&d->map, section)); | |
0f0cb164 AK |
1333 | } |
1334 | ||
1335 | ||
052e87b0 PB |
1336 | static void register_multipage(AddressSpaceDispatch *d, |
1337 | MemoryRegionSection *section) | |
33417e70 | 1338 | { |
a8170e5e | 1339 | hwaddr start_addr = section->offset_within_address_space; |
53cb28cb | 1340 | uint16_t section_index = phys_section_add(&d->map, section); |
052e87b0 PB |
1341 | uint64_t num_pages = int128_get64(int128_rshift(section->size, |
1342 | TARGET_PAGE_BITS)); | |
dd81124b | 1343 | |
733d5ef5 PB |
1344 | assert(num_pages); |
1345 | phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); | |
33417e70 FB |
1346 | } |
1347 | ||
ac1970fb | 1348 | static void mem_add(MemoryListener *listener, MemoryRegionSection *section) |
0f0cb164 | 1349 | { |
89ae337a | 1350 | AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); |
00752703 | 1351 | AddressSpaceDispatch *d = as->next_dispatch; |
99b9cc06 | 1352 | MemoryRegionSection now = *section, remain = *section; |
052e87b0 | 1353 | Int128 page_size = int128_make64(TARGET_PAGE_SIZE); |
0f0cb164 | 1354 | |
733d5ef5 PB |
1355 | if (now.offset_within_address_space & ~TARGET_PAGE_MASK) { |
1356 | uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space) | |
1357 | - now.offset_within_address_space; | |
1358 | ||
052e87b0 | 1359 | now.size = int128_min(int128_make64(left), now.size); |
ac1970fb | 1360 | register_subpage(d, &now); |
733d5ef5 | 1361 | } else { |
052e87b0 | 1362 | now.size = int128_zero(); |
733d5ef5 | 1363 | } |
052e87b0 PB |
1364 | while (int128_ne(remain.size, now.size)) { |
1365 | remain.size = int128_sub(remain.size, now.size); | |
1366 | remain.offset_within_address_space += int128_get64(now.size); | |
1367 | remain.offset_within_region += int128_get64(now.size); | |
69b67646 | 1368 | now = remain; |
052e87b0 | 1369 | if (int128_lt(remain.size, page_size)) { |
733d5ef5 | 1370 | register_subpage(d, &now); |
88266249 | 1371 | } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) { |
052e87b0 | 1372 | now.size = page_size; |
ac1970fb | 1373 | register_subpage(d, &now); |
69b67646 | 1374 | } else { |
052e87b0 | 1375 | now.size = int128_and(now.size, int128_neg(page_size)); |
ac1970fb | 1376 | register_multipage(d, &now); |
69b67646 | 1377 | } |
0f0cb164 AK |
1378 | } |
1379 | } | |
1380 | ||
62a2744c SY |
1381 | void qemu_flush_coalesced_mmio_buffer(void) |
1382 | { | |
1383 | if (kvm_enabled()) | |
1384 | kvm_flush_coalesced_mmio_buffer(); | |
1385 | } | |
1386 | ||
b2a8658e UD |
1387 | void qemu_mutex_lock_ramlist(void) |
1388 | { | |
1389 | qemu_mutex_lock(&ram_list.mutex); | |
1390 | } | |
1391 | ||
1392 | void qemu_mutex_unlock_ramlist(void) | |
1393 | { | |
1394 | qemu_mutex_unlock(&ram_list.mutex); | |
1395 | } | |
1396 | ||
be9b23c4 PX |
1397 | void ram_block_dump(Monitor *mon) |
1398 | { | |
1399 | RAMBlock *block; | |
1400 | char *psize; | |
1401 | ||
1402 | rcu_read_lock(); | |
1403 | monitor_printf(mon, "%24s %8s %18s %18s %18s\n", | |
1404 | "Block Name", "PSize", "Offset", "Used", "Total"); | |
1405 | RAMBLOCK_FOREACH(block) { | |
1406 | psize = size_to_str(block->page_size); | |
1407 | monitor_printf(mon, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64 | |
1408 | " 0x%016" PRIx64 "\n", block->idstr, psize, | |
1409 | (uint64_t)block->offset, | |
1410 | (uint64_t)block->used_length, | |
1411 | (uint64_t)block->max_length); | |
1412 | g_free(psize); | |
1413 | } | |
1414 | rcu_read_unlock(); | |
1415 | } | |
1416 | ||
9c607668 AK |
1417 | #ifdef __linux__ |
1418 | /* | |
1419 | * FIXME TOCTTOU: this iterates over memory backends' mem-path, which | |
1420 | * may or may not name the same files / on the same filesystem now as | |
1421 | * when we actually open and map them. Iterate over the file | |
1422 | * descriptors instead, and use qemu_fd_getpagesize(). | |
1423 | */ | |
1424 | static int find_max_supported_pagesize(Object *obj, void *opaque) | |
1425 | { | |
1426 | char *mem_path; | |
1427 | long *hpsize_min = opaque; | |
1428 | ||
1429 | if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) { | |
1430 | mem_path = object_property_get_str(obj, "mem-path", NULL); | |
1431 | if (mem_path) { | |
1432 | long hpsize = qemu_mempath_getpagesize(mem_path); | |
1433 | if (hpsize < *hpsize_min) { | |
1434 | *hpsize_min = hpsize; | |
1435 | } | |
1436 | } else { | |
1437 | *hpsize_min = getpagesize(); | |
1438 | } | |
1439 | } | |
1440 | ||
1441 | return 0; | |
1442 | } | |
1443 | ||
1444 | long qemu_getrampagesize(void) | |
1445 | { | |
1446 | long hpsize = LONG_MAX; | |
1447 | long mainrampagesize; | |
1448 | Object *memdev_root; | |
1449 | ||
1450 | if (mem_path) { | |
1451 | mainrampagesize = qemu_mempath_getpagesize(mem_path); | |
1452 | } else { | |
1453 | mainrampagesize = getpagesize(); | |
1454 | } | |
1455 | ||
1456 | /* it's possible we have memory-backend objects with | |
1457 | * hugepage-backed RAM. these may get mapped into system | |
1458 | * address space via -numa parameters or memory hotplug | |
1459 | * hooks. we want to take these into account, but we | |
1460 | * also want to make sure these supported hugepage | |
1461 | * sizes are applicable across the entire range of memory | |
1462 | * we may boot from, so we take the min across all | |
1463 | * backends, and assume normal pages in cases where a | |
1464 | * backend isn't backed by hugepages. | |
1465 | */ | |
1466 | memdev_root = object_resolve_path("/objects", NULL); | |
1467 | if (memdev_root) { | |
1468 | object_child_foreach(memdev_root, find_max_supported_pagesize, &hpsize); | |
1469 | } | |
1470 | if (hpsize == LONG_MAX) { | |
1471 | /* No additional memory regions found ==> Report main RAM page size */ | |
1472 | return mainrampagesize; | |
1473 | } | |
1474 | ||
1475 | /* If NUMA is disabled or the NUMA nodes are not backed with a | |
1476 | * memory-backend, then there is at least one node using "normal" RAM, | |
1477 | * so if its page size is smaller we have got to report that size instead. | |
1478 | */ | |
1479 | if (hpsize > mainrampagesize && | |
1480 | (nb_numa_nodes == 0 || numa_info[0].node_memdev == NULL)) { | |
1481 | static bool warned; | |
1482 | if (!warned) { | |
1483 | error_report("Huge page support disabled (n/a for main memory)."); | |
1484 | warned = true; | |
1485 | } | |
1486 | return mainrampagesize; | |
1487 | } | |
1488 | ||
1489 | return hpsize; | |
1490 | } | |
1491 | #else | |
1492 | long qemu_getrampagesize(void) | |
1493 | { | |
1494 | return getpagesize(); | |
1495 | } | |
1496 | #endif | |
1497 | ||
e1e84ba0 | 1498 | #ifdef __linux__ |
d6af99c9 HZ |
1499 | static int64_t get_file_size(int fd) |
1500 | { | |
1501 | int64_t size = lseek(fd, 0, SEEK_END); | |
1502 | if (size < 0) { | |
1503 | return -errno; | |
1504 | } | |
1505 | return size; | |
1506 | } | |
1507 | ||
8d37b030 MAL |
1508 | static int file_ram_open(const char *path, |
1509 | const char *region_name, | |
1510 | bool *created, | |
1511 | Error **errp) | |
c902760f MT |
1512 | { |
1513 | char *filename; | |
8ca761f6 PF |
1514 | char *sanitized_name; |
1515 | char *c; | |
5c3ece79 | 1516 | int fd = -1; |
c902760f | 1517 | |
8d37b030 | 1518 | *created = false; |
fd97fd44 MA |
1519 | for (;;) { |
1520 | fd = open(path, O_RDWR); | |
1521 | if (fd >= 0) { | |
1522 | /* @path names an existing file, use it */ | |
1523 | break; | |
8d31d6b6 | 1524 | } |
fd97fd44 MA |
1525 | if (errno == ENOENT) { |
1526 | /* @path names a file that doesn't exist, create it */ | |
1527 | fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644); | |
1528 | if (fd >= 0) { | |
8d37b030 | 1529 | *created = true; |
fd97fd44 MA |
1530 | break; |
1531 | } | |
1532 | } else if (errno == EISDIR) { | |
1533 | /* @path names a directory, create a file there */ | |
1534 | /* Make name safe to use with mkstemp by replacing '/' with '_'. */ | |
8d37b030 | 1535 | sanitized_name = g_strdup(region_name); |
fd97fd44 MA |
1536 | for (c = sanitized_name; *c != '\0'; c++) { |
1537 | if (*c == '/') { | |
1538 | *c = '_'; | |
1539 | } | |
1540 | } | |
8ca761f6 | 1541 | |
fd97fd44 MA |
1542 | filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path, |
1543 | sanitized_name); | |
1544 | g_free(sanitized_name); | |
8d31d6b6 | 1545 | |
fd97fd44 MA |
1546 | fd = mkstemp(filename); |
1547 | if (fd >= 0) { | |
1548 | unlink(filename); | |
1549 | g_free(filename); | |
1550 | break; | |
1551 | } | |
1552 | g_free(filename); | |
8d31d6b6 | 1553 | } |
fd97fd44 MA |
1554 | if (errno != EEXIST && errno != EINTR) { |
1555 | error_setg_errno(errp, errno, | |
1556 | "can't open backing store %s for guest RAM", | |
1557 | path); | |
8d37b030 | 1558 | return -1; |
fd97fd44 MA |
1559 | } |
1560 | /* | |
1561 | * Try again on EINTR and EEXIST. The latter happens when | |
1562 | * something else creates the file between our two open(). | |
1563 | */ | |
8d31d6b6 | 1564 | } |
c902760f | 1565 | |
8d37b030 MAL |
1566 | return fd; |
1567 | } | |
1568 | ||
1569 | static void *file_ram_alloc(RAMBlock *block, | |
1570 | ram_addr_t memory, | |
1571 | int fd, | |
1572 | bool truncate, | |
1573 | Error **errp) | |
1574 | { | |
1575 | void *area; | |
1576 | ||
863e9621 | 1577 | block->page_size = qemu_fd_getpagesize(fd); |
8360668e HZ |
1578 | block->mr->align = block->page_size; |
1579 | #if defined(__s390x__) | |
1580 | if (kvm_enabled()) { | |
1581 | block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN); | |
1582 | } | |
1583 | #endif | |
fd97fd44 | 1584 | |
863e9621 | 1585 | if (memory < block->page_size) { |
fd97fd44 | 1586 | error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to " |
863e9621 DDAG |
1587 | "or larger than page size 0x%zx", |
1588 | memory, block->page_size); | |
8d37b030 | 1589 | return NULL; |
1775f111 HZ |
1590 | } |
1591 | ||
863e9621 | 1592 | memory = ROUND_UP(memory, block->page_size); |
c902760f MT |
1593 | |
1594 | /* | |
1595 | * ftruncate is not supported by hugetlbfs in older | |
1596 | * hosts, so don't bother bailing out on errors. | |
1597 | * If anything goes wrong with it under other filesystems, | |
1598 | * mmap will fail. | |
d6af99c9 HZ |
1599 | * |
1600 | * Do not truncate the non-empty backend file to avoid corrupting | |
1601 | * the existing data in the file. Disabling shrinking is not | |
1602 | * enough. For example, the current vNVDIMM implementation stores | |
1603 | * the guest NVDIMM labels at the end of the backend file. If the | |
1604 | * backend file is later extended, QEMU will not be able to find | |
1605 | * those labels. Therefore, extending the non-empty backend file | |
1606 | * is disabled as well. | |
c902760f | 1607 | */ |
8d37b030 | 1608 | if (truncate && ftruncate(fd, memory)) { |
9742bf26 | 1609 | perror("ftruncate"); |
7f56e740 | 1610 | } |
c902760f | 1611 | |
d2f39add DD |
1612 | area = qemu_ram_mmap(fd, memory, block->mr->align, |
1613 | block->flags & RAM_SHARED); | |
c902760f | 1614 | if (area == MAP_FAILED) { |
7f56e740 | 1615 | error_setg_errno(errp, errno, |
fd97fd44 | 1616 | "unable to map backing store for guest RAM"); |
8d37b030 | 1617 | return NULL; |
c902760f | 1618 | } |
ef36fa14 MT |
1619 | |
1620 | if (mem_prealloc) { | |
1e356fc1 | 1621 | os_mem_prealloc(fd, area, memory, smp_cpus, errp); |
056b68af | 1622 | if (errp && *errp) { |
8d37b030 MAL |
1623 | qemu_ram_munmap(area, memory); |
1624 | return NULL; | |
056b68af | 1625 | } |
ef36fa14 MT |
1626 | } |
1627 | ||
04b16653 | 1628 | block->fd = fd; |
c902760f MT |
1629 | return area; |
1630 | } | |
1631 | #endif | |
1632 | ||
0dc3f44a | 1633 | /* Called with the ramlist lock held. */ |
d17b5288 | 1634 | static ram_addr_t find_ram_offset(ram_addr_t size) |
04b16653 AW |
1635 | { |
1636 | RAMBlock *block, *next_block; | |
3e837b2c | 1637 | ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; |
04b16653 | 1638 | |
49cd9ac6 SH |
1639 | assert(size != 0); /* it would hand out same offset multiple times */ |
1640 | ||
0dc3f44a | 1641 | if (QLIST_EMPTY_RCU(&ram_list.blocks)) { |
04b16653 | 1642 | return 0; |
0d53d9fe | 1643 | } |
04b16653 | 1644 | |
99e15582 | 1645 | RAMBLOCK_FOREACH(block) { |
f15fbc4b | 1646 | ram_addr_t end, next = RAM_ADDR_MAX; |
04b16653 | 1647 | |
62be4e3a | 1648 | end = block->offset + block->max_length; |
04b16653 | 1649 | |
99e15582 | 1650 | RAMBLOCK_FOREACH(next_block) { |
04b16653 AW |
1651 | if (next_block->offset >= end) { |
1652 | next = MIN(next, next_block->offset); | |
1653 | } | |
1654 | } | |
1655 | if (next - end >= size && next - end < mingap) { | |
3e837b2c | 1656 | offset = end; |
04b16653 AW |
1657 | mingap = next - end; |
1658 | } | |
1659 | } | |
3e837b2c AW |
1660 | |
1661 | if (offset == RAM_ADDR_MAX) { | |
1662 | fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n", | |
1663 | (uint64_t)size); | |
1664 | abort(); | |
1665 | } | |
1666 | ||
04b16653 AW |
1667 | return offset; |
1668 | } | |
1669 | ||
b8c48993 | 1670 | unsigned long last_ram_page(void) |
d17b5288 AW |
1671 | { |
1672 | RAMBlock *block; | |
1673 | ram_addr_t last = 0; | |
1674 | ||
0dc3f44a | 1675 | rcu_read_lock(); |
99e15582 | 1676 | RAMBLOCK_FOREACH(block) { |
62be4e3a | 1677 | last = MAX(last, block->offset + block->max_length); |
0d53d9fe | 1678 | } |
0dc3f44a | 1679 | rcu_read_unlock(); |
b8c48993 | 1680 | return last >> TARGET_PAGE_BITS; |
d17b5288 AW |
1681 | } |
1682 | ||
ddb97f1d JB |
1683 | static void qemu_ram_setup_dump(void *addr, ram_addr_t size) |
1684 | { | |
1685 | int ret; | |
ddb97f1d JB |
1686 | |
1687 | /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */ | |
47c8ca53 | 1688 | if (!machine_dump_guest_core(current_machine)) { |
ddb97f1d JB |
1689 | ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP); |
1690 | if (ret) { | |
1691 | perror("qemu_madvise"); | |
1692 | fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, " | |
1693 | "but dump_guest_core=off specified\n"); | |
1694 | } | |
1695 | } | |
1696 | } | |
1697 | ||
422148d3 DDAG |
1698 | const char *qemu_ram_get_idstr(RAMBlock *rb) |
1699 | { | |
1700 | return rb->idstr; | |
1701 | } | |
1702 | ||
463a4ac2 DDAG |
1703 | bool qemu_ram_is_shared(RAMBlock *rb) |
1704 | { | |
1705 | return rb->flags & RAM_SHARED; | |
1706 | } | |
1707 | ||
ae3a7047 | 1708 | /* Called with iothread lock held. */ |
fa53a0e5 | 1709 | void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev) |
20cfe881 | 1710 | { |
fa53a0e5 | 1711 | RAMBlock *block; |
20cfe881 | 1712 | |
c5705a77 AK |
1713 | assert(new_block); |
1714 | assert(!new_block->idstr[0]); | |
84b89d78 | 1715 | |
09e5ab63 AL |
1716 | if (dev) { |
1717 | char *id = qdev_get_dev_path(dev); | |
84b89d78 CM |
1718 | if (id) { |
1719 | snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); | |
7267c094 | 1720 | g_free(id); |
84b89d78 CM |
1721 | } |
1722 | } | |
1723 | pstrcat(new_block->idstr, sizeof(new_block->idstr), name); | |
1724 | ||
ab0a9956 | 1725 | rcu_read_lock(); |
99e15582 | 1726 | RAMBLOCK_FOREACH(block) { |
fa53a0e5 GA |
1727 | if (block != new_block && |
1728 | !strcmp(block->idstr, new_block->idstr)) { | |
84b89d78 CM |
1729 | fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", |
1730 | new_block->idstr); | |
1731 | abort(); | |
1732 | } | |
1733 | } | |
0dc3f44a | 1734 | rcu_read_unlock(); |
c5705a77 AK |
1735 | } |
1736 | ||
ae3a7047 | 1737 | /* Called with iothread lock held. */ |
fa53a0e5 | 1738 | void qemu_ram_unset_idstr(RAMBlock *block) |
20cfe881 | 1739 | { |
ae3a7047 MD |
1740 | /* FIXME: arch_init.c assumes that this is not called throughout |
1741 | * migration. Ignore the problem since hot-unplug during migration | |
1742 | * does not work anyway. | |
1743 | */ | |
20cfe881 HT |
1744 | if (block) { |
1745 | memset(block->idstr, 0, sizeof(block->idstr)); | |
1746 | } | |
1747 | } | |
1748 | ||
863e9621 DDAG |
1749 | size_t qemu_ram_pagesize(RAMBlock *rb) |
1750 | { | |
1751 | return rb->page_size; | |
1752 | } | |
1753 | ||
67f11b5c DDAG |
1754 | /* Returns the largest size of page in use */ |
1755 | size_t qemu_ram_pagesize_largest(void) | |
1756 | { | |
1757 | RAMBlock *block; | |
1758 | size_t largest = 0; | |
1759 | ||
99e15582 | 1760 | RAMBLOCK_FOREACH(block) { |
67f11b5c DDAG |
1761 | largest = MAX(largest, qemu_ram_pagesize(block)); |
1762 | } | |
1763 | ||
1764 | return largest; | |
1765 | } | |
1766 | ||
8490fc78 LC |
1767 | static int memory_try_enable_merging(void *addr, size_t len) |
1768 | { | |
75cc7f01 | 1769 | if (!machine_mem_merge(current_machine)) { |
8490fc78 LC |
1770 | /* disabled by the user */ |
1771 | return 0; | |
1772 | } | |
1773 | ||
1774 | return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE); | |
1775 | } | |
1776 | ||
62be4e3a MT |
1777 | /* Only legal before guest might have detected the memory size: e.g. on |
1778 | * incoming migration, or right after reset. | |
1779 | * | |
1780 | * As memory core doesn't know how is memory accessed, it is up to | |
1781 | * resize callback to update device state and/or add assertions to detect | |
1782 | * misuse, if necessary. | |
1783 | */ | |
fa53a0e5 | 1784 | int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp) |
62be4e3a | 1785 | { |
62be4e3a MT |
1786 | assert(block); |
1787 | ||
4ed023ce | 1788 | newsize = HOST_PAGE_ALIGN(newsize); |
129ddaf3 | 1789 | |
62be4e3a MT |
1790 | if (block->used_length == newsize) { |
1791 | return 0; | |
1792 | } | |
1793 | ||
1794 | if (!(block->flags & RAM_RESIZEABLE)) { | |
1795 | error_setg_errno(errp, EINVAL, | |
1796 | "Length mismatch: %s: 0x" RAM_ADDR_FMT | |
1797 | " in != 0x" RAM_ADDR_FMT, block->idstr, | |
1798 | newsize, block->used_length); | |
1799 | return -EINVAL; | |
1800 | } | |
1801 | ||
1802 | if (block->max_length < newsize) { | |
1803 | error_setg_errno(errp, EINVAL, | |
1804 | "Length too large: %s: 0x" RAM_ADDR_FMT | |
1805 | " > 0x" RAM_ADDR_FMT, block->idstr, | |
1806 | newsize, block->max_length); | |
1807 | return -EINVAL; | |
1808 | } | |
1809 | ||
1810 | cpu_physical_memory_clear_dirty_range(block->offset, block->used_length); | |
1811 | block->used_length = newsize; | |
58d2707e PB |
1812 | cpu_physical_memory_set_dirty_range(block->offset, block->used_length, |
1813 | DIRTY_CLIENTS_ALL); | |
62be4e3a MT |
1814 | memory_region_set_size(block->mr, newsize); |
1815 | if (block->resized) { | |
1816 | block->resized(block->idstr, newsize, block->host); | |
1817 | } | |
1818 | return 0; | |
1819 | } | |
1820 | ||
5b82b703 SH |
1821 | /* Called with ram_list.mutex held */ |
1822 | static void dirty_memory_extend(ram_addr_t old_ram_size, | |
1823 | ram_addr_t new_ram_size) | |
1824 | { | |
1825 | ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size, | |
1826 | DIRTY_MEMORY_BLOCK_SIZE); | |
1827 | ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size, | |
1828 | DIRTY_MEMORY_BLOCK_SIZE); | |
1829 | int i; | |
1830 | ||
1831 | /* Only need to extend if block count increased */ | |
1832 | if (new_num_blocks <= old_num_blocks) { | |
1833 | return; | |
1834 | } | |
1835 | ||
1836 | for (i = 0; i < DIRTY_MEMORY_NUM; i++) { | |
1837 | DirtyMemoryBlocks *old_blocks; | |
1838 | DirtyMemoryBlocks *new_blocks; | |
1839 | int j; | |
1840 | ||
1841 | old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]); | |
1842 | new_blocks = g_malloc(sizeof(*new_blocks) + | |
1843 | sizeof(new_blocks->blocks[0]) * new_num_blocks); | |
1844 | ||
1845 | if (old_num_blocks) { | |
1846 | memcpy(new_blocks->blocks, old_blocks->blocks, | |
1847 | old_num_blocks * sizeof(old_blocks->blocks[0])); | |
1848 | } | |
1849 | ||
1850 | for (j = old_num_blocks; j < new_num_blocks; j++) { | |
1851 | new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE); | |
1852 | } | |
1853 | ||
1854 | atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks); | |
1855 | ||
1856 | if (old_blocks) { | |
1857 | g_free_rcu(old_blocks, rcu); | |
1858 | } | |
1859 | } | |
1860 | } | |
1861 | ||
528f46af | 1862 | static void ram_block_add(RAMBlock *new_block, Error **errp) |
c5705a77 | 1863 | { |
e1c57ab8 | 1864 | RAMBlock *block; |
0d53d9fe | 1865 | RAMBlock *last_block = NULL; |
2152f5ca | 1866 | ram_addr_t old_ram_size, new_ram_size; |
37aa7a0e | 1867 | Error *err = NULL; |
2152f5ca | 1868 | |
b8c48993 | 1869 | old_ram_size = last_ram_page(); |
c5705a77 | 1870 | |
b2a8658e | 1871 | qemu_mutex_lock_ramlist(); |
9b8424d5 | 1872 | new_block->offset = find_ram_offset(new_block->max_length); |
e1c57ab8 PB |
1873 | |
1874 | if (!new_block->host) { | |
1875 | if (xen_enabled()) { | |
9b8424d5 | 1876 | xen_ram_alloc(new_block->offset, new_block->max_length, |
37aa7a0e MA |
1877 | new_block->mr, &err); |
1878 | if (err) { | |
1879 | error_propagate(errp, err); | |
1880 | qemu_mutex_unlock_ramlist(); | |
39c350ee | 1881 | return; |
37aa7a0e | 1882 | } |
e1c57ab8 | 1883 | } else { |
9b8424d5 | 1884 | new_block->host = phys_mem_alloc(new_block->max_length, |
a2b257d6 | 1885 | &new_block->mr->align); |
39228250 | 1886 | if (!new_block->host) { |
ef701d7b HT |
1887 | error_setg_errno(errp, errno, |
1888 | "cannot set up guest memory '%s'", | |
1889 | memory_region_name(new_block->mr)); | |
1890 | qemu_mutex_unlock_ramlist(); | |
39c350ee | 1891 | return; |
39228250 | 1892 | } |
9b8424d5 | 1893 | memory_try_enable_merging(new_block->host, new_block->max_length); |
6977dfe6 | 1894 | } |
c902760f | 1895 | } |
94a6b54f | 1896 | |
dd631697 LZ |
1897 | new_ram_size = MAX(old_ram_size, |
1898 | (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS); | |
1899 | if (new_ram_size > old_ram_size) { | |
5b82b703 | 1900 | dirty_memory_extend(old_ram_size, new_ram_size); |
dd631697 | 1901 | } |
0d53d9fe MD |
1902 | /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ, |
1903 | * QLIST (which has an RCU-friendly variant) does not have insertion at | |
1904 | * tail, so save the last element in last_block. | |
1905 | */ | |
99e15582 | 1906 | RAMBLOCK_FOREACH(block) { |
0d53d9fe | 1907 | last_block = block; |
9b8424d5 | 1908 | if (block->max_length < new_block->max_length) { |
abb26d63 PB |
1909 | break; |
1910 | } | |
1911 | } | |
1912 | if (block) { | |
0dc3f44a | 1913 | QLIST_INSERT_BEFORE_RCU(block, new_block, next); |
0d53d9fe | 1914 | } else if (last_block) { |
0dc3f44a | 1915 | QLIST_INSERT_AFTER_RCU(last_block, new_block, next); |
0d53d9fe | 1916 | } else { /* list is empty */ |
0dc3f44a | 1917 | QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next); |
abb26d63 | 1918 | } |
0d6d3c87 | 1919 | ram_list.mru_block = NULL; |
94a6b54f | 1920 | |
0dc3f44a MD |
1921 | /* Write list before version */ |
1922 | smp_wmb(); | |
f798b07f | 1923 | ram_list.version++; |
b2a8658e | 1924 | qemu_mutex_unlock_ramlist(); |
f798b07f | 1925 | |
9b8424d5 | 1926 | cpu_physical_memory_set_dirty_range(new_block->offset, |
58d2707e PB |
1927 | new_block->used_length, |
1928 | DIRTY_CLIENTS_ALL); | |
94a6b54f | 1929 | |
a904c911 PB |
1930 | if (new_block->host) { |
1931 | qemu_ram_setup_dump(new_block->host, new_block->max_length); | |
1932 | qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE); | |
c2cd627d | 1933 | /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */ |
a904c911 | 1934 | qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK); |
0987d735 | 1935 | ram_block_notify_add(new_block->host, new_block->max_length); |
e1c57ab8 | 1936 | } |
94a6b54f | 1937 | } |
e9a1ab19 | 1938 | |
0b183fc8 | 1939 | #ifdef __linux__ |
38b3362d MAL |
1940 | RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, |
1941 | bool share, int fd, | |
1942 | Error **errp) | |
e1c57ab8 PB |
1943 | { |
1944 | RAMBlock *new_block; | |
ef701d7b | 1945 | Error *local_err = NULL; |
8d37b030 | 1946 | int64_t file_size; |
e1c57ab8 PB |
1947 | |
1948 | if (xen_enabled()) { | |
7f56e740 | 1949 | error_setg(errp, "-mem-path not supported with Xen"); |
528f46af | 1950 | return NULL; |
e1c57ab8 PB |
1951 | } |
1952 | ||
e45e7ae2 MAL |
1953 | if (kvm_enabled() && !kvm_has_sync_mmu()) { |
1954 | error_setg(errp, | |
1955 | "host lacks kvm mmu notifiers, -mem-path unsupported"); | |
1956 | return NULL; | |
1957 | } | |
1958 | ||
e1c57ab8 PB |
1959 | if (phys_mem_alloc != qemu_anon_ram_alloc) { |
1960 | /* | |
1961 | * file_ram_alloc() needs to allocate just like | |
1962 | * phys_mem_alloc, but we haven't bothered to provide | |
1963 | * a hook there. | |
1964 | */ | |
7f56e740 PB |
1965 | error_setg(errp, |
1966 | "-mem-path not supported with this accelerator"); | |
528f46af | 1967 | return NULL; |
e1c57ab8 PB |
1968 | } |
1969 | ||
4ed023ce | 1970 | size = HOST_PAGE_ALIGN(size); |
8d37b030 MAL |
1971 | file_size = get_file_size(fd); |
1972 | if (file_size > 0 && file_size < size) { | |
1973 | error_setg(errp, "backing store %s size 0x%" PRIx64 | |
1974 | " does not match 'size' option 0x" RAM_ADDR_FMT, | |
1975 | mem_path, file_size, size); | |
8d37b030 MAL |
1976 | return NULL; |
1977 | } | |
1978 | ||
e1c57ab8 PB |
1979 | new_block = g_malloc0(sizeof(*new_block)); |
1980 | new_block->mr = mr; | |
9b8424d5 MT |
1981 | new_block->used_length = size; |
1982 | new_block->max_length = size; | |
dbcb8981 | 1983 | new_block->flags = share ? RAM_SHARED : 0; |
8d37b030 | 1984 | new_block->host = file_ram_alloc(new_block, size, fd, !file_size, errp); |
7f56e740 PB |
1985 | if (!new_block->host) { |
1986 | g_free(new_block); | |
528f46af | 1987 | return NULL; |
7f56e740 PB |
1988 | } |
1989 | ||
528f46af | 1990 | ram_block_add(new_block, &local_err); |
ef701d7b HT |
1991 | if (local_err) { |
1992 | g_free(new_block); | |
1993 | error_propagate(errp, local_err); | |
528f46af | 1994 | return NULL; |
ef701d7b | 1995 | } |
528f46af | 1996 | return new_block; |
38b3362d MAL |
1997 | |
1998 | } | |
1999 | ||
2000 | ||
2001 | RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, | |
2002 | bool share, const char *mem_path, | |
2003 | Error **errp) | |
2004 | { | |
2005 | int fd; | |
2006 | bool created; | |
2007 | RAMBlock *block; | |
2008 | ||
2009 | fd = file_ram_open(mem_path, memory_region_name(mr), &created, errp); | |
2010 | if (fd < 0) { | |
2011 | return NULL; | |
2012 | } | |
2013 | ||
2014 | block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp); | |
2015 | if (!block) { | |
2016 | if (created) { | |
2017 | unlink(mem_path); | |
2018 | } | |
2019 | close(fd); | |
2020 | return NULL; | |
2021 | } | |
2022 | ||
2023 | return block; | |
e1c57ab8 | 2024 | } |
0b183fc8 | 2025 | #endif |
e1c57ab8 | 2026 | |
62be4e3a | 2027 | static |
528f46af FZ |
2028 | RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size, |
2029 | void (*resized)(const char*, | |
2030 | uint64_t length, | |
2031 | void *host), | |
2032 | void *host, bool resizeable, | |
2033 | MemoryRegion *mr, Error **errp) | |
e1c57ab8 PB |
2034 | { |
2035 | RAMBlock *new_block; | |
ef701d7b | 2036 | Error *local_err = NULL; |
e1c57ab8 | 2037 | |
4ed023ce DDAG |
2038 | size = HOST_PAGE_ALIGN(size); |
2039 | max_size = HOST_PAGE_ALIGN(max_size); | |
e1c57ab8 PB |
2040 | new_block = g_malloc0(sizeof(*new_block)); |
2041 | new_block->mr = mr; | |
62be4e3a | 2042 | new_block->resized = resized; |
9b8424d5 MT |
2043 | new_block->used_length = size; |
2044 | new_block->max_length = max_size; | |
62be4e3a | 2045 | assert(max_size >= size); |
e1c57ab8 | 2046 | new_block->fd = -1; |
863e9621 | 2047 | new_block->page_size = getpagesize(); |
e1c57ab8 PB |
2048 | new_block->host = host; |
2049 | if (host) { | |
7bd4f430 | 2050 | new_block->flags |= RAM_PREALLOC; |
e1c57ab8 | 2051 | } |
62be4e3a MT |
2052 | if (resizeable) { |
2053 | new_block->flags |= RAM_RESIZEABLE; | |
2054 | } | |
528f46af | 2055 | ram_block_add(new_block, &local_err); |
ef701d7b HT |
2056 | if (local_err) { |
2057 | g_free(new_block); | |
2058 | error_propagate(errp, local_err); | |
528f46af | 2059 | return NULL; |
ef701d7b | 2060 | } |
528f46af | 2061 | return new_block; |
e1c57ab8 PB |
2062 | } |
2063 | ||
528f46af | 2064 | RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, |
62be4e3a MT |
2065 | MemoryRegion *mr, Error **errp) |
2066 | { | |
2067 | return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp); | |
2068 | } | |
2069 | ||
528f46af | 2070 | RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp) |
6977dfe6 | 2071 | { |
62be4e3a MT |
2072 | return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp); |
2073 | } | |
2074 | ||
528f46af | 2075 | RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz, |
62be4e3a MT |
2076 | void (*resized)(const char*, |
2077 | uint64_t length, | |
2078 | void *host), | |
2079 | MemoryRegion *mr, Error **errp) | |
2080 | { | |
2081 | return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp); | |
6977dfe6 YT |
2082 | } |
2083 | ||
43771539 PB |
2084 | static void reclaim_ramblock(RAMBlock *block) |
2085 | { | |
2086 | if (block->flags & RAM_PREALLOC) { | |
2087 | ; | |
2088 | } else if (xen_enabled()) { | |
2089 | xen_invalidate_map_cache_entry(block->host); | |
2090 | #ifndef _WIN32 | |
2091 | } else if (block->fd >= 0) { | |
2f3a2bb1 | 2092 | qemu_ram_munmap(block->host, block->max_length); |
43771539 PB |
2093 | close(block->fd); |
2094 | #endif | |
2095 | } else { | |
2096 | qemu_anon_ram_free(block->host, block->max_length); | |
2097 | } | |
2098 | g_free(block); | |
2099 | } | |
2100 | ||
f1060c55 | 2101 | void qemu_ram_free(RAMBlock *block) |
e9a1ab19 | 2102 | { |
85bc2a15 MAL |
2103 | if (!block) { |
2104 | return; | |
2105 | } | |
2106 | ||
0987d735 PB |
2107 | if (block->host) { |
2108 | ram_block_notify_remove(block->host, block->max_length); | |
2109 | } | |
2110 | ||
b2a8658e | 2111 | qemu_mutex_lock_ramlist(); |
f1060c55 FZ |
2112 | QLIST_REMOVE_RCU(block, next); |
2113 | ram_list.mru_block = NULL; | |
2114 | /* Write list before version */ | |
2115 | smp_wmb(); | |
2116 | ram_list.version++; | |
2117 | call_rcu(block, reclaim_ramblock, rcu); | |
b2a8658e | 2118 | qemu_mutex_unlock_ramlist(); |
e9a1ab19 FB |
2119 | } |
2120 | ||
cd19cfa2 HY |
2121 | #ifndef _WIN32 |
2122 | void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) | |
2123 | { | |
2124 | RAMBlock *block; | |
2125 | ram_addr_t offset; | |
2126 | int flags; | |
2127 | void *area, *vaddr; | |
2128 | ||
99e15582 | 2129 | RAMBLOCK_FOREACH(block) { |
cd19cfa2 | 2130 | offset = addr - block->offset; |
9b8424d5 | 2131 | if (offset < block->max_length) { |
1240be24 | 2132 | vaddr = ramblock_ptr(block, offset); |
7bd4f430 | 2133 | if (block->flags & RAM_PREALLOC) { |
cd19cfa2 | 2134 | ; |
dfeaf2ab MA |
2135 | } else if (xen_enabled()) { |
2136 | abort(); | |
cd19cfa2 HY |
2137 | } else { |
2138 | flags = MAP_FIXED; | |
3435f395 | 2139 | if (block->fd >= 0) { |
dbcb8981 PB |
2140 | flags |= (block->flags & RAM_SHARED ? |
2141 | MAP_SHARED : MAP_PRIVATE); | |
3435f395 MA |
2142 | area = mmap(vaddr, length, PROT_READ | PROT_WRITE, |
2143 | flags, block->fd, offset); | |
cd19cfa2 | 2144 | } else { |
2eb9fbaa MA |
2145 | /* |
2146 | * Remap needs to match alloc. Accelerators that | |
2147 | * set phys_mem_alloc never remap. If they did, | |
2148 | * we'd need a remap hook here. | |
2149 | */ | |
2150 | assert(phys_mem_alloc == qemu_anon_ram_alloc); | |
2151 | ||
cd19cfa2 HY |
2152 | flags |= MAP_PRIVATE | MAP_ANONYMOUS; |
2153 | area = mmap(vaddr, length, PROT_READ | PROT_WRITE, | |
2154 | flags, -1, 0); | |
cd19cfa2 HY |
2155 | } |
2156 | if (area != vaddr) { | |
f15fbc4b AP |
2157 | fprintf(stderr, "Could not remap addr: " |
2158 | RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n", | |
cd19cfa2 HY |
2159 | length, addr); |
2160 | exit(1); | |
2161 | } | |
8490fc78 | 2162 | memory_try_enable_merging(vaddr, length); |
ddb97f1d | 2163 | qemu_ram_setup_dump(vaddr, length); |
cd19cfa2 | 2164 | } |
cd19cfa2 HY |
2165 | } |
2166 | } | |
2167 | } | |
2168 | #endif /* !_WIN32 */ | |
2169 | ||
1b5ec234 | 2170 | /* Return a host pointer to ram allocated with qemu_ram_alloc. |
ae3a7047 MD |
2171 | * This should not be used for general purpose DMA. Use address_space_map |
2172 | * or address_space_rw instead. For local memory (e.g. video ram) that the | |
2173 | * device owns, use memory_region_get_ram_ptr. | |
0dc3f44a | 2174 | * |
49b24afc | 2175 | * Called within RCU critical section. |
1b5ec234 | 2176 | */ |
0878d0e1 | 2177 | void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr) |
1b5ec234 | 2178 | { |
3655cb9c GA |
2179 | RAMBlock *block = ram_block; |
2180 | ||
2181 | if (block == NULL) { | |
2182 | block = qemu_get_ram_block(addr); | |
0878d0e1 | 2183 | addr -= block->offset; |
3655cb9c | 2184 | } |
ae3a7047 MD |
2185 | |
2186 | if (xen_enabled() && block->host == NULL) { | |
0d6d3c87 PB |
2187 | /* We need to check if the requested address is in the RAM |
2188 | * because we don't want to map the entire memory in QEMU. | |
2189 | * In that case just map until the end of the page. | |
2190 | */ | |
2191 | if (block->offset == 0) { | |
1ff7c598 | 2192 | return xen_map_cache(addr, 0, 0, false); |
0d6d3c87 | 2193 | } |
ae3a7047 | 2194 | |
1ff7c598 | 2195 | block->host = xen_map_cache(block->offset, block->max_length, 1, false); |
0d6d3c87 | 2196 | } |
0878d0e1 | 2197 | return ramblock_ptr(block, addr); |
dc828ca1 PB |
2198 | } |
2199 | ||
0878d0e1 | 2200 | /* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr |
ae3a7047 | 2201 | * but takes a size argument. |
0dc3f44a | 2202 | * |
e81bcda5 | 2203 | * Called within RCU critical section. |
ae3a7047 | 2204 | */ |
3655cb9c GA |
2205 | static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr, |
2206 | hwaddr *size) | |
38bee5dc | 2207 | { |
3655cb9c | 2208 | RAMBlock *block = ram_block; |
8ab934f9 SS |
2209 | if (*size == 0) { |
2210 | return NULL; | |
2211 | } | |
e81bcda5 | 2212 | |
3655cb9c GA |
2213 | if (block == NULL) { |
2214 | block = qemu_get_ram_block(addr); | |
0878d0e1 | 2215 | addr -= block->offset; |
3655cb9c | 2216 | } |
0878d0e1 | 2217 | *size = MIN(*size, block->max_length - addr); |
e81bcda5 PB |
2218 | |
2219 | if (xen_enabled() && block->host == NULL) { | |
2220 | /* We need to check if the requested address is in the RAM | |
2221 | * because we don't want to map the entire memory in QEMU. | |
2222 | * In that case just map the requested area. | |
2223 | */ | |
2224 | if (block->offset == 0) { | |
1ff7c598 | 2225 | return xen_map_cache(addr, *size, 1, true); |
38bee5dc SS |
2226 | } |
2227 | ||
1ff7c598 | 2228 | block->host = xen_map_cache(block->offset, block->max_length, 1, true); |
38bee5dc | 2229 | } |
e81bcda5 | 2230 | |
0878d0e1 | 2231 | return ramblock_ptr(block, addr); |
38bee5dc SS |
2232 | } |
2233 | ||
422148d3 DDAG |
2234 | /* |
2235 | * Translates a host ptr back to a RAMBlock, a ram_addr and an offset | |
2236 | * in that RAMBlock. | |
2237 | * | |
2238 | * ptr: Host pointer to look up | |
2239 | * round_offset: If true round the result offset down to a page boundary | |
2240 | * *ram_addr: set to result ram_addr | |
2241 | * *offset: set to result offset within the RAMBlock | |
2242 | * | |
2243 | * Returns: RAMBlock (or NULL if not found) | |
ae3a7047 MD |
2244 | * |
2245 | * By the time this function returns, the returned pointer is not protected | |
2246 | * by RCU anymore. If the caller is not within an RCU critical section and | |
2247 | * does not hold the iothread lock, it must have other means of protecting the | |
2248 | * pointer, such as a reference to the region that includes the incoming | |
2249 | * ram_addr_t. | |
2250 | */ | |
422148d3 | 2251 | RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, |
422148d3 | 2252 | ram_addr_t *offset) |
5579c7f3 | 2253 | { |
94a6b54f PB |
2254 | RAMBlock *block; |
2255 | uint8_t *host = ptr; | |
2256 | ||
868bb33f | 2257 | if (xen_enabled()) { |
f615f396 | 2258 | ram_addr_t ram_addr; |
0dc3f44a | 2259 | rcu_read_lock(); |
f615f396 PB |
2260 | ram_addr = xen_ram_addr_from_mapcache(ptr); |
2261 | block = qemu_get_ram_block(ram_addr); | |
422148d3 | 2262 | if (block) { |
d6b6aec4 | 2263 | *offset = ram_addr - block->offset; |
422148d3 | 2264 | } |
0dc3f44a | 2265 | rcu_read_unlock(); |
422148d3 | 2266 | return block; |
712c2b41 SS |
2267 | } |
2268 | ||
0dc3f44a MD |
2269 | rcu_read_lock(); |
2270 | block = atomic_rcu_read(&ram_list.mru_block); | |
9b8424d5 | 2271 | if (block && block->host && host - block->host < block->max_length) { |
23887b79 PB |
2272 | goto found; |
2273 | } | |
2274 | ||
99e15582 | 2275 | RAMBLOCK_FOREACH(block) { |
432d268c JN |
2276 | /* This case append when the block is not mapped. */ |
2277 | if (block->host == NULL) { | |
2278 | continue; | |
2279 | } | |
9b8424d5 | 2280 | if (host - block->host < block->max_length) { |
23887b79 | 2281 | goto found; |
f471a17e | 2282 | } |
94a6b54f | 2283 | } |
432d268c | 2284 | |
0dc3f44a | 2285 | rcu_read_unlock(); |
1b5ec234 | 2286 | return NULL; |
23887b79 PB |
2287 | |
2288 | found: | |
422148d3 DDAG |
2289 | *offset = (host - block->host); |
2290 | if (round_offset) { | |
2291 | *offset &= TARGET_PAGE_MASK; | |
2292 | } | |
0dc3f44a | 2293 | rcu_read_unlock(); |
422148d3 DDAG |
2294 | return block; |
2295 | } | |
2296 | ||
e3dd7493 DDAG |
2297 | /* |
2298 | * Finds the named RAMBlock | |
2299 | * | |
2300 | * name: The name of RAMBlock to find | |
2301 | * | |
2302 | * Returns: RAMBlock (or NULL if not found) | |
2303 | */ | |
2304 | RAMBlock *qemu_ram_block_by_name(const char *name) | |
2305 | { | |
2306 | RAMBlock *block; | |
2307 | ||
99e15582 | 2308 | RAMBLOCK_FOREACH(block) { |
e3dd7493 DDAG |
2309 | if (!strcmp(name, block->idstr)) { |
2310 | return block; | |
2311 | } | |
2312 | } | |
2313 | ||
2314 | return NULL; | |
2315 | } | |
2316 | ||
422148d3 DDAG |
2317 | /* Some of the softmmu routines need to translate from a host pointer |
2318 | (typically a TLB entry) back to a ram offset. */ | |
07bdaa41 | 2319 | ram_addr_t qemu_ram_addr_from_host(void *ptr) |
422148d3 DDAG |
2320 | { |
2321 | RAMBlock *block; | |
f615f396 | 2322 | ram_addr_t offset; |
422148d3 | 2323 | |
f615f396 | 2324 | block = qemu_ram_block_from_host(ptr, false, &offset); |
422148d3 | 2325 | if (!block) { |
07bdaa41 | 2326 | return RAM_ADDR_INVALID; |
422148d3 DDAG |
2327 | } |
2328 | ||
07bdaa41 | 2329 | return block->offset + offset; |
e890261f | 2330 | } |
f471a17e | 2331 | |
49b24afc | 2332 | /* Called within RCU critical section. */ |
a8170e5e | 2333 | static void notdirty_mem_write(void *opaque, hwaddr ram_addr, |
0e0df1e2 | 2334 | uint64_t val, unsigned size) |
9fa3e853 | 2335 | { |
ba051fb5 AB |
2336 | bool locked = false; |
2337 | ||
5aa1ef71 | 2338 | assert(tcg_enabled()); |
52159192 | 2339 | if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { |
ba051fb5 AB |
2340 | locked = true; |
2341 | tb_lock(); | |
0e0df1e2 | 2342 | tb_invalidate_phys_page_fast(ram_addr, size); |
3a7d929e | 2343 | } |
0e0df1e2 AK |
2344 | switch (size) { |
2345 | case 1: | |
0878d0e1 | 2346 | stb_p(qemu_map_ram_ptr(NULL, ram_addr), val); |
0e0df1e2 AK |
2347 | break; |
2348 | case 2: | |
0878d0e1 | 2349 | stw_p(qemu_map_ram_ptr(NULL, ram_addr), val); |
0e0df1e2 AK |
2350 | break; |
2351 | case 4: | |
0878d0e1 | 2352 | stl_p(qemu_map_ram_ptr(NULL, ram_addr), val); |
0e0df1e2 AK |
2353 | break; |
2354 | default: | |
2355 | abort(); | |
3a7d929e | 2356 | } |
ba051fb5 AB |
2357 | |
2358 | if (locked) { | |
2359 | tb_unlock(); | |
2360 | } | |
2361 | ||
58d2707e PB |
2362 | /* Set both VGA and migration bits for simplicity and to remove |
2363 | * the notdirty callback faster. | |
2364 | */ | |
2365 | cpu_physical_memory_set_dirty_range(ram_addr, size, | |
2366 | DIRTY_CLIENTS_NOCODE); | |
f23db169 FB |
2367 | /* we remove the notdirty callback only if the code has been |
2368 | flushed */ | |
a2cd8c85 | 2369 | if (!cpu_physical_memory_is_clean(ram_addr)) { |
bcae01e4 | 2370 | tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr); |
4917cf44 | 2371 | } |
9fa3e853 FB |
2372 | } |
2373 | ||
b018ddf6 PB |
2374 | static bool notdirty_mem_accepts(void *opaque, hwaddr addr, |
2375 | unsigned size, bool is_write) | |
2376 | { | |
2377 | return is_write; | |
2378 | } | |
2379 | ||
0e0df1e2 | 2380 | static const MemoryRegionOps notdirty_mem_ops = { |
0e0df1e2 | 2381 | .write = notdirty_mem_write, |
b018ddf6 | 2382 | .valid.accepts = notdirty_mem_accepts, |
0e0df1e2 | 2383 | .endianness = DEVICE_NATIVE_ENDIAN, |
1ccde1cb FB |
2384 | }; |
2385 | ||
0f459d16 | 2386 | /* Generate a debug exception if a watchpoint has been hit. */ |
66b9b43c | 2387 | static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags) |
0f459d16 | 2388 | { |
93afeade | 2389 | CPUState *cpu = current_cpu; |
568496c0 | 2390 | CPUClass *cc = CPU_GET_CLASS(cpu); |
93afeade | 2391 | CPUArchState *env = cpu->env_ptr; |
06d55cc1 | 2392 | target_ulong pc, cs_base; |
0f459d16 | 2393 | target_ulong vaddr; |
a1d1bb31 | 2394 | CPUWatchpoint *wp; |
89fee74a | 2395 | uint32_t cpu_flags; |
0f459d16 | 2396 | |
5aa1ef71 | 2397 | assert(tcg_enabled()); |
ff4700b0 | 2398 | if (cpu->watchpoint_hit) { |
06d55cc1 AL |
2399 | /* We re-entered the check after replacing the TB. Now raise |
2400 | * the debug interrupt so that is will trigger after the | |
2401 | * current instruction. */ | |
93afeade | 2402 | cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); |
06d55cc1 AL |
2403 | return; |
2404 | } | |
93afeade | 2405 | vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset; |
40612000 | 2406 | vaddr = cc->adjust_watchpoint_address(cpu, vaddr, len); |
ff4700b0 | 2407 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { |
05068c0d PM |
2408 | if (cpu_watchpoint_address_matches(wp, vaddr, len) |
2409 | && (wp->flags & flags)) { | |
08225676 PM |
2410 | if (flags == BP_MEM_READ) { |
2411 | wp->flags |= BP_WATCHPOINT_HIT_READ; | |
2412 | } else { | |
2413 | wp->flags |= BP_WATCHPOINT_HIT_WRITE; | |
2414 | } | |
2415 | wp->hitaddr = vaddr; | |
66b9b43c | 2416 | wp->hitattrs = attrs; |
ff4700b0 | 2417 | if (!cpu->watchpoint_hit) { |
568496c0 SF |
2418 | if (wp->flags & BP_CPU && |
2419 | !cc->debug_check_watchpoint(cpu, wp)) { | |
2420 | wp->flags &= ~BP_WATCHPOINT_HIT; | |
2421 | continue; | |
2422 | } | |
ff4700b0 | 2423 | cpu->watchpoint_hit = wp; |
a5e99826 | 2424 | |
8d04fb55 JK |
2425 | /* Both tb_lock and iothread_mutex will be reset when |
2426 | * cpu_loop_exit or cpu_loop_exit_noexc longjmp | |
2427 | * back into the cpu_exec main loop. | |
a5e99826 FK |
2428 | */ |
2429 | tb_lock(); | |
239c51a5 | 2430 | tb_check_watchpoint(cpu); |
6e140f28 | 2431 | if (wp->flags & BP_STOP_BEFORE_ACCESS) { |
27103424 | 2432 | cpu->exception_index = EXCP_DEBUG; |
5638d180 | 2433 | cpu_loop_exit(cpu); |
6e140f28 AL |
2434 | } else { |
2435 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags); | |
648f034c | 2436 | tb_gen_code(cpu, pc, cs_base, cpu_flags, 1); |
6886b980 | 2437 | cpu_loop_exit_noexc(cpu); |
6e140f28 | 2438 | } |
06d55cc1 | 2439 | } |
6e140f28 AL |
2440 | } else { |
2441 | wp->flags &= ~BP_WATCHPOINT_HIT; | |
0f459d16 PB |
2442 | } |
2443 | } | |
2444 | } | |
2445 | ||
6658ffb8 PB |
2446 | /* Watchpoint access routines. Watchpoints are inserted using TLB tricks, |
2447 | so these check for a hit then pass through to the normal out-of-line | |
2448 | phys routines. */ | |
66b9b43c PM |
2449 | static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata, |
2450 | unsigned size, MemTxAttrs attrs) | |
6658ffb8 | 2451 | { |
66b9b43c PM |
2452 | MemTxResult res; |
2453 | uint64_t data; | |
79ed0416 PM |
2454 | int asidx = cpu_asidx_from_attrs(current_cpu, attrs); |
2455 | AddressSpace *as = current_cpu->cpu_ases[asidx].as; | |
66b9b43c PM |
2456 | |
2457 | check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ); | |
1ec9b909 | 2458 | switch (size) { |
66b9b43c | 2459 | case 1: |
79ed0416 | 2460 | data = address_space_ldub(as, addr, attrs, &res); |
66b9b43c PM |
2461 | break; |
2462 | case 2: | |
79ed0416 | 2463 | data = address_space_lduw(as, addr, attrs, &res); |
66b9b43c PM |
2464 | break; |
2465 | case 4: | |
79ed0416 | 2466 | data = address_space_ldl(as, addr, attrs, &res); |
66b9b43c | 2467 | break; |
1ec9b909 AK |
2468 | default: abort(); |
2469 | } | |
66b9b43c PM |
2470 | *pdata = data; |
2471 | return res; | |
6658ffb8 PB |
2472 | } |
2473 | ||
66b9b43c PM |
2474 | static MemTxResult watch_mem_write(void *opaque, hwaddr addr, |
2475 | uint64_t val, unsigned size, | |
2476 | MemTxAttrs attrs) | |
6658ffb8 | 2477 | { |
66b9b43c | 2478 | MemTxResult res; |
79ed0416 PM |
2479 | int asidx = cpu_asidx_from_attrs(current_cpu, attrs); |
2480 | AddressSpace *as = current_cpu->cpu_ases[asidx].as; | |
66b9b43c PM |
2481 | |
2482 | check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE); | |
1ec9b909 | 2483 | switch (size) { |
67364150 | 2484 | case 1: |
79ed0416 | 2485 | address_space_stb(as, addr, val, attrs, &res); |
67364150 MF |
2486 | break; |
2487 | case 2: | |
79ed0416 | 2488 | address_space_stw(as, addr, val, attrs, &res); |
67364150 MF |
2489 | break; |
2490 | case 4: | |
79ed0416 | 2491 | address_space_stl(as, addr, val, attrs, &res); |
67364150 | 2492 | break; |
1ec9b909 AK |
2493 | default: abort(); |
2494 | } | |
66b9b43c | 2495 | return res; |
6658ffb8 PB |
2496 | } |
2497 | ||
1ec9b909 | 2498 | static const MemoryRegionOps watch_mem_ops = { |
66b9b43c PM |
2499 | .read_with_attrs = watch_mem_read, |
2500 | .write_with_attrs = watch_mem_write, | |
1ec9b909 | 2501 | .endianness = DEVICE_NATIVE_ENDIAN, |
6658ffb8 | 2502 | }; |
6658ffb8 | 2503 | |
f25a49e0 PM |
2504 | static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data, |
2505 | unsigned len, MemTxAttrs attrs) | |
db7b5426 | 2506 | { |
acc9d80b | 2507 | subpage_t *subpage = opaque; |
ff6cff75 | 2508 | uint8_t buf[8]; |
5c9eb028 | 2509 | MemTxResult res; |
791af8c8 | 2510 | |
db7b5426 | 2511 | #if defined(DEBUG_SUBPAGE) |
016e9d62 | 2512 | printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__, |
acc9d80b | 2513 | subpage, len, addr); |
db7b5426 | 2514 | #endif |
5c9eb028 PM |
2515 | res = address_space_read(subpage->as, addr + subpage->base, |
2516 | attrs, buf, len); | |
2517 | if (res) { | |
2518 | return res; | |
f25a49e0 | 2519 | } |
acc9d80b JK |
2520 | switch (len) { |
2521 | case 1: | |
f25a49e0 PM |
2522 | *data = ldub_p(buf); |
2523 | return MEMTX_OK; | |
acc9d80b | 2524 | case 2: |
f25a49e0 PM |
2525 | *data = lduw_p(buf); |
2526 | return MEMTX_OK; | |
acc9d80b | 2527 | case 4: |
f25a49e0 PM |
2528 | *data = ldl_p(buf); |
2529 | return MEMTX_OK; | |
ff6cff75 | 2530 | case 8: |
f25a49e0 PM |
2531 | *data = ldq_p(buf); |
2532 | return MEMTX_OK; | |
acc9d80b JK |
2533 | default: |
2534 | abort(); | |
2535 | } | |
db7b5426 BS |
2536 | } |
2537 | ||
f25a49e0 PM |
2538 | static MemTxResult subpage_write(void *opaque, hwaddr addr, |
2539 | uint64_t value, unsigned len, MemTxAttrs attrs) | |
db7b5426 | 2540 | { |
acc9d80b | 2541 | subpage_t *subpage = opaque; |
ff6cff75 | 2542 | uint8_t buf[8]; |
acc9d80b | 2543 | |
db7b5426 | 2544 | #if defined(DEBUG_SUBPAGE) |
016e9d62 | 2545 | printf("%s: subpage %p len %u addr " TARGET_FMT_plx |
acc9d80b JK |
2546 | " value %"PRIx64"\n", |
2547 | __func__, subpage, len, addr, value); | |
db7b5426 | 2548 | #endif |
acc9d80b JK |
2549 | switch (len) { |
2550 | case 1: | |
2551 | stb_p(buf, value); | |
2552 | break; | |
2553 | case 2: | |
2554 | stw_p(buf, value); | |
2555 | break; | |
2556 | case 4: | |
2557 | stl_p(buf, value); | |
2558 | break; | |
ff6cff75 PB |
2559 | case 8: |
2560 | stq_p(buf, value); | |
2561 | break; | |
acc9d80b JK |
2562 | default: |
2563 | abort(); | |
2564 | } | |
5c9eb028 PM |
2565 | return address_space_write(subpage->as, addr + subpage->base, |
2566 | attrs, buf, len); | |
db7b5426 BS |
2567 | } |
2568 | ||
c353e4cc | 2569 | static bool subpage_accepts(void *opaque, hwaddr addr, |
016e9d62 | 2570 | unsigned len, bool is_write) |
c353e4cc | 2571 | { |
acc9d80b | 2572 | subpage_t *subpage = opaque; |
c353e4cc | 2573 | #if defined(DEBUG_SUBPAGE) |
016e9d62 | 2574 | printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n", |
acc9d80b | 2575 | __func__, subpage, is_write ? 'w' : 'r', len, addr); |
c353e4cc PB |
2576 | #endif |
2577 | ||
acc9d80b | 2578 | return address_space_access_valid(subpage->as, addr + subpage->base, |
016e9d62 | 2579 | len, is_write); |
c353e4cc PB |
2580 | } |
2581 | ||
70c68e44 | 2582 | static const MemoryRegionOps subpage_ops = { |
f25a49e0 PM |
2583 | .read_with_attrs = subpage_read, |
2584 | .write_with_attrs = subpage_write, | |
ff6cff75 PB |
2585 | .impl.min_access_size = 1, |
2586 | .impl.max_access_size = 8, | |
2587 | .valid.min_access_size = 1, | |
2588 | .valid.max_access_size = 8, | |
c353e4cc | 2589 | .valid.accepts = subpage_accepts, |
70c68e44 | 2590 | .endianness = DEVICE_NATIVE_ENDIAN, |
db7b5426 BS |
2591 | }; |
2592 | ||
c227f099 | 2593 | static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, |
5312bd8b | 2594 | uint16_t section) |
db7b5426 BS |
2595 | { |
2596 | int idx, eidx; | |
2597 | ||
2598 | if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) | |
2599 | return -1; | |
2600 | idx = SUBPAGE_IDX(start); | |
2601 | eidx = SUBPAGE_IDX(end); | |
2602 | #if defined(DEBUG_SUBPAGE) | |
016e9d62 AK |
2603 | printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n", |
2604 | __func__, mmio, start, end, idx, eidx, section); | |
db7b5426 | 2605 | #endif |
db7b5426 | 2606 | for (; idx <= eidx; idx++) { |
5312bd8b | 2607 | mmio->sub_section[idx] = section; |
db7b5426 BS |
2608 | } |
2609 | ||
2610 | return 0; | |
2611 | } | |
2612 | ||
acc9d80b | 2613 | static subpage_t *subpage_init(AddressSpace *as, hwaddr base) |
db7b5426 | 2614 | { |
c227f099 | 2615 | subpage_t *mmio; |
db7b5426 | 2616 | |
2615fabd | 2617 | mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t)); |
acc9d80b | 2618 | mmio->as = as; |
1eec614b | 2619 | mmio->base = base; |
2c9b15ca | 2620 | memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio, |
b4fefef9 | 2621 | NULL, TARGET_PAGE_SIZE); |
b3b00c78 | 2622 | mmio->iomem.subpage = true; |
db7b5426 | 2623 | #if defined(DEBUG_SUBPAGE) |
016e9d62 AK |
2624 | printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__, |
2625 | mmio, base, TARGET_PAGE_SIZE); | |
db7b5426 | 2626 | #endif |
b41aac4f | 2627 | subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED); |
db7b5426 BS |
2628 | |
2629 | return mmio; | |
2630 | } | |
2631 | ||
a656e22f PC |
2632 | static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as, |
2633 | MemoryRegion *mr) | |
5312bd8b | 2634 | { |
a656e22f | 2635 | assert(as); |
5312bd8b | 2636 | MemoryRegionSection section = { |
a656e22f | 2637 | .address_space = as, |
5312bd8b AK |
2638 | .mr = mr, |
2639 | .offset_within_address_space = 0, | |
2640 | .offset_within_region = 0, | |
052e87b0 | 2641 | .size = int128_2_64(), |
5312bd8b AK |
2642 | }; |
2643 | ||
53cb28cb | 2644 | return phys_section_add(map, §ion); |
5312bd8b AK |
2645 | } |
2646 | ||
a54c87b6 | 2647 | MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs) |
aa102231 | 2648 | { |
a54c87b6 PM |
2649 | int asidx = cpu_asidx_from_attrs(cpu, attrs); |
2650 | CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; | |
32857f4d | 2651 | AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch); |
79e2b9ae | 2652 | MemoryRegionSection *sections = d->map.sections; |
9d82b5a7 PB |
2653 | |
2654 | return sections[index & ~TARGET_PAGE_MASK].mr; | |
aa102231 AK |
2655 | } |
2656 | ||
e9179ce1 AK |
2657 | static void io_mem_init(void) |
2658 | { | |
1f6245e5 | 2659 | memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX); |
2c9b15ca | 2660 | memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL, |
1f6245e5 | 2661 | NULL, UINT64_MAX); |
8d04fb55 JK |
2662 | |
2663 | /* io_mem_notdirty calls tb_invalidate_phys_page_fast, | |
2664 | * which can be called without the iothread mutex. | |
2665 | */ | |
2c9b15ca | 2666 | memory_region_init_io(&io_mem_notdirty, NULL, ¬dirty_mem_ops, NULL, |
1f6245e5 | 2667 | NULL, UINT64_MAX); |
8d04fb55 JK |
2668 | memory_region_clear_global_locking(&io_mem_notdirty); |
2669 | ||
2c9b15ca | 2670 | memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL, |
1f6245e5 | 2671 | NULL, UINT64_MAX); |
e9179ce1 AK |
2672 | } |
2673 | ||
ac1970fb | 2674 | static void mem_begin(MemoryListener *listener) |
00752703 PB |
2675 | { |
2676 | AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); | |
53cb28cb MA |
2677 | AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1); |
2678 | uint16_t n; | |
2679 | ||
a656e22f | 2680 | n = dummy_section(&d->map, as, &io_mem_unassigned); |
53cb28cb | 2681 | assert(n == PHYS_SECTION_UNASSIGNED); |
a656e22f | 2682 | n = dummy_section(&d->map, as, &io_mem_notdirty); |
53cb28cb | 2683 | assert(n == PHYS_SECTION_NOTDIRTY); |
a656e22f | 2684 | n = dummy_section(&d->map, as, &io_mem_rom); |
53cb28cb | 2685 | assert(n == PHYS_SECTION_ROM); |
a656e22f | 2686 | n = dummy_section(&d->map, as, &io_mem_watch); |
53cb28cb | 2687 | assert(n == PHYS_SECTION_WATCH); |
00752703 | 2688 | |
9736e55b | 2689 | d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 }; |
00752703 PB |
2690 | d->as = as; |
2691 | as->next_dispatch = d; | |
2692 | } | |
2693 | ||
79e2b9ae PB |
2694 | static void address_space_dispatch_free(AddressSpaceDispatch *d) |
2695 | { | |
2696 | phys_sections_free(&d->map); | |
2697 | g_free(d); | |
2698 | } | |
2699 | ||
00752703 | 2700 | static void mem_commit(MemoryListener *listener) |
ac1970fb | 2701 | { |
89ae337a | 2702 | AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener); |
0475d94f PB |
2703 | AddressSpaceDispatch *cur = as->dispatch; |
2704 | AddressSpaceDispatch *next = as->next_dispatch; | |
2705 | ||
53cb28cb | 2706 | phys_page_compact_all(next, next->map.nodes_nb); |
b35ba30f | 2707 | |
79e2b9ae | 2708 | atomic_rcu_set(&as->dispatch, next); |
53cb28cb | 2709 | if (cur) { |
79e2b9ae | 2710 | call_rcu(cur, address_space_dispatch_free, rcu); |
53cb28cb | 2711 | } |
9affd6fc PB |
2712 | } |
2713 | ||
1d71148e | 2714 | static void tcg_commit(MemoryListener *listener) |
50c1e149 | 2715 | { |
32857f4d PM |
2716 | CPUAddressSpace *cpuas; |
2717 | AddressSpaceDispatch *d; | |
117712c3 AK |
2718 | |
2719 | /* since each CPU stores ram addresses in its TLB cache, we must | |
2720 | reset the modified entries */ | |
32857f4d PM |
2721 | cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); |
2722 | cpu_reloading_memory_map(); | |
2723 | /* The CPU and TLB are protected by the iothread lock. | |
2724 | * We reload the dispatch pointer now because cpu_reloading_memory_map() | |
2725 | * may have split the RCU critical section. | |
2726 | */ | |
2727 | d = atomic_rcu_read(&cpuas->as->dispatch); | |
f35e44e7 | 2728 | atomic_rcu_set(&cpuas->memory_dispatch, d); |
d10eb08f | 2729 | tlb_flush(cpuas->cpu); |
50c1e149 AK |
2730 | } |
2731 | ||
ac1970fb AK |
2732 | void address_space_init_dispatch(AddressSpace *as) |
2733 | { | |
00752703 | 2734 | as->dispatch = NULL; |
89ae337a | 2735 | as->dispatch_listener = (MemoryListener) { |
ac1970fb | 2736 | .begin = mem_begin, |
00752703 | 2737 | .commit = mem_commit, |
ac1970fb AK |
2738 | .region_add = mem_add, |
2739 | .region_nop = mem_add, | |
2740 | .priority = 0, | |
2741 | }; | |
89ae337a | 2742 | memory_listener_register(&as->dispatch_listener, as); |
ac1970fb AK |
2743 | } |
2744 | ||
6e48e8f9 PB |
2745 | void address_space_unregister(AddressSpace *as) |
2746 | { | |
2747 | memory_listener_unregister(&as->dispatch_listener); | |
2748 | } | |
2749 | ||
83f3c251 AK |
2750 | void address_space_destroy_dispatch(AddressSpace *as) |
2751 | { | |
2752 | AddressSpaceDispatch *d = as->dispatch; | |
2753 | ||
79e2b9ae PB |
2754 | atomic_rcu_set(&as->dispatch, NULL); |
2755 | if (d) { | |
2756 | call_rcu(d, address_space_dispatch_free, rcu); | |
2757 | } | |
83f3c251 AK |
2758 | } |
2759 | ||
62152b8a AK |
2760 | static void memory_map_init(void) |
2761 | { | |
7267c094 | 2762 | system_memory = g_malloc(sizeof(*system_memory)); |
03f49957 | 2763 | |
57271d63 | 2764 | memory_region_init(system_memory, NULL, "system", UINT64_MAX); |
7dca8043 | 2765 | address_space_init(&address_space_memory, system_memory, "memory"); |
309cb471 | 2766 | |
7267c094 | 2767 | system_io = g_malloc(sizeof(*system_io)); |
3bb28b72 JK |
2768 | memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io", |
2769 | 65536); | |
7dca8043 | 2770 | address_space_init(&address_space_io, system_io, "I/O"); |
62152b8a AK |
2771 | } |
2772 | ||
2773 | MemoryRegion *get_system_memory(void) | |
2774 | { | |
2775 | return system_memory; | |
2776 | } | |
2777 | ||
309cb471 AK |
2778 | MemoryRegion *get_system_io(void) |
2779 | { | |
2780 | return system_io; | |
2781 | } | |
2782 | ||
e2eef170 PB |
2783 | #endif /* !defined(CONFIG_USER_ONLY) */ |
2784 | ||
13eb76e0 FB |
2785 | /* physical memory access (slow version, mainly for debug) */ |
2786 | #if defined(CONFIG_USER_ONLY) | |
f17ec444 | 2787 | int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, |
a68fe89c | 2788 | uint8_t *buf, int len, int is_write) |
13eb76e0 FB |
2789 | { |
2790 | int l, flags; | |
2791 | target_ulong page; | |
53a5960a | 2792 | void * p; |
13eb76e0 FB |
2793 | |
2794 | while (len > 0) { | |
2795 | page = addr & TARGET_PAGE_MASK; | |
2796 | l = (page + TARGET_PAGE_SIZE) - addr; | |
2797 | if (l > len) | |
2798 | l = len; | |
2799 | flags = page_get_flags(page); | |
2800 | if (!(flags & PAGE_VALID)) | |
a68fe89c | 2801 | return -1; |
13eb76e0 FB |
2802 | if (is_write) { |
2803 | if (!(flags & PAGE_WRITE)) | |
a68fe89c | 2804 | return -1; |
579a97f7 | 2805 | /* XXX: this code should not depend on lock_user */ |
72fb7daa | 2806 | if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) |
a68fe89c | 2807 | return -1; |
72fb7daa AJ |
2808 | memcpy(p, buf, l); |
2809 | unlock_user(p, addr, l); | |
13eb76e0 FB |
2810 | } else { |
2811 | if (!(flags & PAGE_READ)) | |
a68fe89c | 2812 | return -1; |
579a97f7 | 2813 | /* XXX: this code should not depend on lock_user */ |
72fb7daa | 2814 | if (!(p = lock_user(VERIFY_READ, addr, l, 1))) |
a68fe89c | 2815 | return -1; |
72fb7daa | 2816 | memcpy(buf, p, l); |
5b257578 | 2817 | unlock_user(p, addr, 0); |
13eb76e0 FB |
2818 | } |
2819 | len -= l; | |
2820 | buf += l; | |
2821 | addr += l; | |
2822 | } | |
a68fe89c | 2823 | return 0; |
13eb76e0 | 2824 | } |
8df1cd07 | 2825 | |
13eb76e0 | 2826 | #else |
51d7a9eb | 2827 | |
845b6214 | 2828 | static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, |
a8170e5e | 2829 | hwaddr length) |
51d7a9eb | 2830 | { |
e87f7778 | 2831 | uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr); |
0878d0e1 PB |
2832 | addr += memory_region_get_ram_addr(mr); |
2833 | ||
e87f7778 PB |
2834 | /* No early return if dirty_log_mask is or becomes 0, because |
2835 | * cpu_physical_memory_set_dirty_range will still call | |
2836 | * xen_modified_memory. | |
2837 | */ | |
2838 | if (dirty_log_mask) { | |
2839 | dirty_log_mask = | |
2840 | cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask); | |
2841 | } | |
2842 | if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) { | |
5aa1ef71 | 2843 | assert(tcg_enabled()); |
ba051fb5 | 2844 | tb_lock(); |
e87f7778 | 2845 | tb_invalidate_phys_range(addr, addr + length); |
ba051fb5 | 2846 | tb_unlock(); |
e87f7778 | 2847 | dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE); |
51d7a9eb | 2848 | } |
e87f7778 | 2849 | cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask); |
51d7a9eb AP |
2850 | } |
2851 | ||
23326164 | 2852 | static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr) |
82f2563f | 2853 | { |
e1622f4b | 2854 | unsigned access_size_max = mr->ops->valid.max_access_size; |
23326164 RH |
2855 | |
2856 | /* Regions are assumed to support 1-4 byte accesses unless | |
2857 | otherwise specified. */ | |
23326164 RH |
2858 | if (access_size_max == 0) { |
2859 | access_size_max = 4; | |
2860 | } | |
2861 | ||
2862 | /* Bound the maximum access by the alignment of the address. */ | |
2863 | if (!mr->ops->impl.unaligned) { | |
2864 | unsigned align_size_max = addr & -addr; | |
2865 | if (align_size_max != 0 && align_size_max < access_size_max) { | |
2866 | access_size_max = align_size_max; | |
2867 | } | |
82f2563f | 2868 | } |
23326164 RH |
2869 | |
2870 | /* Don't attempt accesses larger than the maximum. */ | |
2871 | if (l > access_size_max) { | |
2872 | l = access_size_max; | |
82f2563f | 2873 | } |
6554f5c0 | 2874 | l = pow2floor(l); |
23326164 RH |
2875 | |
2876 | return l; | |
82f2563f PB |
2877 | } |
2878 | ||
4840f10e | 2879 | static bool prepare_mmio_access(MemoryRegion *mr) |
125b3806 | 2880 | { |
4840f10e JK |
2881 | bool unlocked = !qemu_mutex_iothread_locked(); |
2882 | bool release_lock = false; | |
2883 | ||
2884 | if (unlocked && mr->global_locking) { | |
2885 | qemu_mutex_lock_iothread(); | |
2886 | unlocked = false; | |
2887 | release_lock = true; | |
2888 | } | |
125b3806 | 2889 | if (mr->flush_coalesced_mmio) { |
4840f10e JK |
2890 | if (unlocked) { |
2891 | qemu_mutex_lock_iothread(); | |
2892 | } | |
125b3806 | 2893 | qemu_flush_coalesced_mmio_buffer(); |
4840f10e JK |
2894 | if (unlocked) { |
2895 | qemu_mutex_unlock_iothread(); | |
2896 | } | |
125b3806 | 2897 | } |
4840f10e JK |
2898 | |
2899 | return release_lock; | |
125b3806 PB |
2900 | } |
2901 | ||
a203ac70 PB |
2902 | /* Called within RCU critical section. */ |
2903 | static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr, | |
2904 | MemTxAttrs attrs, | |
2905 | const uint8_t *buf, | |
2906 | int len, hwaddr addr1, | |
2907 | hwaddr l, MemoryRegion *mr) | |
13eb76e0 | 2908 | { |
13eb76e0 | 2909 | uint8_t *ptr; |
791af8c8 | 2910 | uint64_t val; |
3b643495 | 2911 | MemTxResult result = MEMTX_OK; |
4840f10e | 2912 | bool release_lock = false; |
3b46e624 | 2913 | |
a203ac70 | 2914 | for (;;) { |
eb7eeb88 PB |
2915 | if (!memory_access_is_direct(mr, true)) { |
2916 | release_lock |= prepare_mmio_access(mr); | |
2917 | l = memory_access_size(mr, l, addr1); | |
2918 | /* XXX: could force current_cpu to NULL to avoid | |
2919 | potential bugs */ | |
2920 | switch (l) { | |
2921 | case 8: | |
2922 | /* 64 bit write access */ | |
2923 | val = ldq_p(buf); | |
2924 | result |= memory_region_dispatch_write(mr, addr1, val, 8, | |
2925 | attrs); | |
2926 | break; | |
2927 | case 4: | |
2928 | /* 32 bit write access */ | |
6da67de6 | 2929 | val = (uint32_t)ldl_p(buf); |
eb7eeb88 PB |
2930 | result |= memory_region_dispatch_write(mr, addr1, val, 4, |
2931 | attrs); | |
2932 | break; | |
2933 | case 2: | |
2934 | /* 16 bit write access */ | |
2935 | val = lduw_p(buf); | |
2936 | result |= memory_region_dispatch_write(mr, addr1, val, 2, | |
2937 | attrs); | |
2938 | break; | |
2939 | case 1: | |
2940 | /* 8 bit write access */ | |
2941 | val = ldub_p(buf); | |
2942 | result |= memory_region_dispatch_write(mr, addr1, val, 1, | |
2943 | attrs); | |
2944 | break; | |
2945 | default: | |
2946 | abort(); | |
13eb76e0 FB |
2947 | } |
2948 | } else { | |
eb7eeb88 | 2949 | /* RAM case */ |
04bf2526 | 2950 | ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l); |
eb7eeb88 PB |
2951 | memcpy(ptr, buf, l); |
2952 | invalidate_and_set_dirty(mr, addr1, l); | |
13eb76e0 | 2953 | } |
4840f10e JK |
2954 | |
2955 | if (release_lock) { | |
2956 | qemu_mutex_unlock_iothread(); | |
2957 | release_lock = false; | |
2958 | } | |
2959 | ||
13eb76e0 FB |
2960 | len -= l; |
2961 | buf += l; | |
2962 | addr += l; | |
a203ac70 PB |
2963 | |
2964 | if (!len) { | |
2965 | break; | |
2966 | } | |
2967 | ||
2968 | l = len; | |
2969 | mr = address_space_translate(as, addr, &addr1, &l, true); | |
13eb76e0 | 2970 | } |
fd8aaa76 | 2971 | |
3b643495 | 2972 | return result; |
13eb76e0 | 2973 | } |
8df1cd07 | 2974 | |
a203ac70 PB |
2975 | MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, |
2976 | const uint8_t *buf, int len) | |
ac1970fb | 2977 | { |
eb7eeb88 | 2978 | hwaddr l; |
eb7eeb88 PB |
2979 | hwaddr addr1; |
2980 | MemoryRegion *mr; | |
2981 | MemTxResult result = MEMTX_OK; | |
eb7eeb88 | 2982 | |
a203ac70 PB |
2983 | if (len > 0) { |
2984 | rcu_read_lock(); | |
eb7eeb88 | 2985 | l = len; |
a203ac70 PB |
2986 | mr = address_space_translate(as, addr, &addr1, &l, true); |
2987 | result = address_space_write_continue(as, addr, attrs, buf, len, | |
2988 | addr1, l, mr); | |
2989 | rcu_read_unlock(); | |
2990 | } | |
2991 | ||
2992 | return result; | |
2993 | } | |
2994 | ||
2995 | /* Called within RCU critical section. */ | |
2996 | MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr, | |
2997 | MemTxAttrs attrs, uint8_t *buf, | |
2998 | int len, hwaddr addr1, hwaddr l, | |
2999 | MemoryRegion *mr) | |
3000 | { | |
3001 | uint8_t *ptr; | |
3002 | uint64_t val; | |
3003 | MemTxResult result = MEMTX_OK; | |
3004 | bool release_lock = false; | |
eb7eeb88 | 3005 | |
a203ac70 | 3006 | for (;;) { |
eb7eeb88 PB |
3007 | if (!memory_access_is_direct(mr, false)) { |
3008 | /* I/O case */ | |
3009 | release_lock |= prepare_mmio_access(mr); | |
3010 | l = memory_access_size(mr, l, addr1); | |
3011 | switch (l) { | |
3012 | case 8: | |
3013 | /* 64 bit read access */ | |
3014 | result |= memory_region_dispatch_read(mr, addr1, &val, 8, | |
3015 | attrs); | |
3016 | stq_p(buf, val); | |
3017 | break; | |
3018 | case 4: | |
3019 | /* 32 bit read access */ | |
3020 | result |= memory_region_dispatch_read(mr, addr1, &val, 4, | |
3021 | attrs); | |
3022 | stl_p(buf, val); | |
3023 | break; | |
3024 | case 2: | |
3025 | /* 16 bit read access */ | |
3026 | result |= memory_region_dispatch_read(mr, addr1, &val, 2, | |
3027 | attrs); | |
3028 | stw_p(buf, val); | |
3029 | break; | |
3030 | case 1: | |
3031 | /* 8 bit read access */ | |
3032 | result |= memory_region_dispatch_read(mr, addr1, &val, 1, | |
3033 | attrs); | |
3034 | stb_p(buf, val); | |
3035 | break; | |
3036 | default: | |
3037 | abort(); | |
3038 | } | |
3039 | } else { | |
3040 | /* RAM case */ | |
04bf2526 | 3041 | ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l); |
eb7eeb88 PB |
3042 | memcpy(buf, ptr, l); |
3043 | } | |
3044 | ||
3045 | if (release_lock) { | |
3046 | qemu_mutex_unlock_iothread(); | |
3047 | release_lock = false; | |
3048 | } | |
3049 | ||
3050 | len -= l; | |
3051 | buf += l; | |
3052 | addr += l; | |
a203ac70 PB |
3053 | |
3054 | if (!len) { | |
3055 | break; | |
3056 | } | |
3057 | ||
3058 | l = len; | |
3059 | mr = address_space_translate(as, addr, &addr1, &l, false); | |
3060 | } | |
3061 | ||
3062 | return result; | |
3063 | } | |
3064 | ||
3cc8f884 PB |
3065 | MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, |
3066 | MemTxAttrs attrs, uint8_t *buf, int len) | |
a203ac70 PB |
3067 | { |
3068 | hwaddr l; | |
3069 | hwaddr addr1; | |
3070 | MemoryRegion *mr; | |
3071 | MemTxResult result = MEMTX_OK; | |
3072 | ||
3073 | if (len > 0) { | |
3074 | rcu_read_lock(); | |
3075 | l = len; | |
3076 | mr = address_space_translate(as, addr, &addr1, &l, false); | |
3077 | result = address_space_read_continue(as, addr, attrs, buf, len, | |
3078 | addr1, l, mr); | |
3079 | rcu_read_unlock(); | |
eb7eeb88 | 3080 | } |
eb7eeb88 PB |
3081 | |
3082 | return result; | |
ac1970fb AK |
3083 | } |
3084 | ||
eb7eeb88 PB |
3085 | MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, |
3086 | uint8_t *buf, int len, bool is_write) | |
3087 | { | |
3088 | if (is_write) { | |
3089 | return address_space_write(as, addr, attrs, (uint8_t *)buf, len); | |
3090 | } else { | |
3091 | return address_space_read(as, addr, attrs, (uint8_t *)buf, len); | |
3092 | } | |
3093 | } | |
ac1970fb | 3094 | |
a8170e5e | 3095 | void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf, |
ac1970fb AK |
3096 | int len, int is_write) |
3097 | { | |
5c9eb028 PM |
3098 | address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED, |
3099 | buf, len, is_write); | |
ac1970fb AK |
3100 | } |
3101 | ||
582b55a9 AG |
3102 | enum write_rom_type { |
3103 | WRITE_DATA, | |
3104 | FLUSH_CACHE, | |
3105 | }; | |
3106 | ||
2a221651 | 3107 | static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as, |
582b55a9 | 3108 | hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type) |
d0ecd2aa | 3109 | { |
149f54b5 | 3110 | hwaddr l; |
d0ecd2aa | 3111 | uint8_t *ptr; |
149f54b5 | 3112 | hwaddr addr1; |
5c8a00ce | 3113 | MemoryRegion *mr; |
3b46e624 | 3114 | |
41063e1e | 3115 | rcu_read_lock(); |
d0ecd2aa | 3116 | while (len > 0) { |
149f54b5 | 3117 | l = len; |
2a221651 | 3118 | mr = address_space_translate(as, addr, &addr1, &l, true); |
3b46e624 | 3119 | |
5c8a00ce PB |
3120 | if (!(memory_region_is_ram(mr) || |
3121 | memory_region_is_romd(mr))) { | |
b242e0e0 | 3122 | l = memory_access_size(mr, l, addr1); |
d0ecd2aa | 3123 | } else { |
d0ecd2aa | 3124 | /* ROM/RAM case */ |
0878d0e1 | 3125 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); |
582b55a9 AG |
3126 | switch (type) { |
3127 | case WRITE_DATA: | |
3128 | memcpy(ptr, buf, l); | |
845b6214 | 3129 | invalidate_and_set_dirty(mr, addr1, l); |
582b55a9 AG |
3130 | break; |
3131 | case FLUSH_CACHE: | |
3132 | flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l); | |
3133 | break; | |
3134 | } | |
d0ecd2aa FB |
3135 | } |
3136 | len -= l; | |
3137 | buf += l; | |
3138 | addr += l; | |
3139 | } | |
41063e1e | 3140 | rcu_read_unlock(); |
d0ecd2aa FB |
3141 | } |
3142 | ||
582b55a9 | 3143 | /* used for ROM loading : can write in RAM and ROM */ |
2a221651 | 3144 | void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr, |
582b55a9 AG |
3145 | const uint8_t *buf, int len) |
3146 | { | |
2a221651 | 3147 | cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA); |
582b55a9 AG |
3148 | } |
3149 | ||
3150 | void cpu_flush_icache_range(hwaddr start, int len) | |
3151 | { | |
3152 | /* | |
3153 | * This function should do the same thing as an icache flush that was | |
3154 | * triggered from within the guest. For TCG we are always cache coherent, | |
3155 | * so there is no need to flush anything. For KVM / Xen we need to flush | |
3156 | * the host's instruction cache at least. | |
3157 | */ | |
3158 | if (tcg_enabled()) { | |
3159 | return; | |
3160 | } | |
3161 | ||
2a221651 EI |
3162 | cpu_physical_memory_write_rom_internal(&address_space_memory, |
3163 | start, NULL, len, FLUSH_CACHE); | |
582b55a9 AG |
3164 | } |
3165 | ||
6d16c2f8 | 3166 | typedef struct { |
d3e71559 | 3167 | MemoryRegion *mr; |
6d16c2f8 | 3168 | void *buffer; |
a8170e5e AK |
3169 | hwaddr addr; |
3170 | hwaddr len; | |
c2cba0ff | 3171 | bool in_use; |
6d16c2f8 AL |
3172 | } BounceBuffer; |
3173 | ||
3174 | static BounceBuffer bounce; | |
3175 | ||
ba223c29 | 3176 | typedef struct MapClient { |
e95205e1 | 3177 | QEMUBH *bh; |
72cf2d4f | 3178 | QLIST_ENTRY(MapClient) link; |
ba223c29 AL |
3179 | } MapClient; |
3180 | ||
38e047b5 | 3181 | QemuMutex map_client_list_lock; |
72cf2d4f BS |
3182 | static QLIST_HEAD(map_client_list, MapClient) map_client_list |
3183 | = QLIST_HEAD_INITIALIZER(map_client_list); | |
ba223c29 | 3184 | |
e95205e1 FZ |
3185 | static void cpu_unregister_map_client_do(MapClient *client) |
3186 | { | |
3187 | QLIST_REMOVE(client, link); | |
3188 | g_free(client); | |
3189 | } | |
3190 | ||
33b6c2ed FZ |
3191 | static void cpu_notify_map_clients_locked(void) |
3192 | { | |
3193 | MapClient *client; | |
3194 | ||
3195 | while (!QLIST_EMPTY(&map_client_list)) { | |
3196 | client = QLIST_FIRST(&map_client_list); | |
e95205e1 FZ |
3197 | qemu_bh_schedule(client->bh); |
3198 | cpu_unregister_map_client_do(client); | |
33b6c2ed FZ |
3199 | } |
3200 | } | |
3201 | ||
e95205e1 | 3202 | void cpu_register_map_client(QEMUBH *bh) |
ba223c29 | 3203 | { |
7267c094 | 3204 | MapClient *client = g_malloc(sizeof(*client)); |
ba223c29 | 3205 | |
38e047b5 | 3206 | qemu_mutex_lock(&map_client_list_lock); |
e95205e1 | 3207 | client->bh = bh; |
72cf2d4f | 3208 | QLIST_INSERT_HEAD(&map_client_list, client, link); |
33b6c2ed FZ |
3209 | if (!atomic_read(&bounce.in_use)) { |
3210 | cpu_notify_map_clients_locked(); | |
3211 | } | |
38e047b5 | 3212 | qemu_mutex_unlock(&map_client_list_lock); |
ba223c29 AL |
3213 | } |
3214 | ||
38e047b5 | 3215 | void cpu_exec_init_all(void) |
ba223c29 | 3216 | { |
38e047b5 | 3217 | qemu_mutex_init(&ram_list.mutex); |
20bccb82 PM |
3218 | /* The data structures we set up here depend on knowing the page size, |
3219 | * so no more changes can be made after this point. | |
3220 | * In an ideal world, nothing we did before we had finished the | |
3221 | * machine setup would care about the target page size, and we could | |
3222 | * do this much later, rather than requiring board models to state | |
3223 | * up front what their requirements are. | |
3224 | */ | |
3225 | finalize_target_page_bits(); | |
38e047b5 | 3226 | io_mem_init(); |
680a4783 | 3227 | memory_map_init(); |
38e047b5 | 3228 | qemu_mutex_init(&map_client_list_lock); |
ba223c29 AL |
3229 | } |
3230 | ||
e95205e1 | 3231 | void cpu_unregister_map_client(QEMUBH *bh) |
ba223c29 AL |
3232 | { |
3233 | MapClient *client; | |
3234 | ||
e95205e1 FZ |
3235 | qemu_mutex_lock(&map_client_list_lock); |
3236 | QLIST_FOREACH(client, &map_client_list, link) { | |
3237 | if (client->bh == bh) { | |
3238 | cpu_unregister_map_client_do(client); | |
3239 | break; | |
3240 | } | |
ba223c29 | 3241 | } |
e95205e1 | 3242 | qemu_mutex_unlock(&map_client_list_lock); |
ba223c29 AL |
3243 | } |
3244 | ||
3245 | static void cpu_notify_map_clients(void) | |
3246 | { | |
38e047b5 | 3247 | qemu_mutex_lock(&map_client_list_lock); |
33b6c2ed | 3248 | cpu_notify_map_clients_locked(); |
38e047b5 | 3249 | qemu_mutex_unlock(&map_client_list_lock); |
ba223c29 AL |
3250 | } |
3251 | ||
51644ab7 PB |
3252 | bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write) |
3253 | { | |
5c8a00ce | 3254 | MemoryRegion *mr; |
51644ab7 PB |
3255 | hwaddr l, xlat; |
3256 | ||
41063e1e | 3257 | rcu_read_lock(); |
51644ab7 PB |
3258 | while (len > 0) { |
3259 | l = len; | |
5c8a00ce PB |
3260 | mr = address_space_translate(as, addr, &xlat, &l, is_write); |
3261 | if (!memory_access_is_direct(mr, is_write)) { | |
3262 | l = memory_access_size(mr, l, addr); | |
3263 | if (!memory_region_access_valid(mr, xlat, l, is_write)) { | |
5ad4a2b7 | 3264 | rcu_read_unlock(); |
51644ab7 PB |
3265 | return false; |
3266 | } | |
3267 | } | |
3268 | ||
3269 | len -= l; | |
3270 | addr += l; | |
3271 | } | |
41063e1e | 3272 | rcu_read_unlock(); |
51644ab7 PB |
3273 | return true; |
3274 | } | |
3275 | ||
715c31ec PB |
3276 | static hwaddr |
3277 | address_space_extend_translation(AddressSpace *as, hwaddr addr, hwaddr target_len, | |
3278 | MemoryRegion *mr, hwaddr base, hwaddr len, | |
3279 | bool is_write) | |
3280 | { | |
3281 | hwaddr done = 0; | |
3282 | hwaddr xlat; | |
3283 | MemoryRegion *this_mr; | |
3284 | ||
3285 | for (;;) { | |
3286 | target_len -= len; | |
3287 | addr += len; | |
3288 | done += len; | |
3289 | if (target_len == 0) { | |
3290 | return done; | |
3291 | } | |
3292 | ||
3293 | len = target_len; | |
3294 | this_mr = address_space_translate(as, addr, &xlat, &len, is_write); | |
3295 | if (this_mr != mr || xlat != base + done) { | |
3296 | return done; | |
3297 | } | |
3298 | } | |
3299 | } | |
3300 | ||
6d16c2f8 AL |
3301 | /* Map a physical memory region into a host virtual address. |
3302 | * May map a subset of the requested range, given by and returned in *plen. | |
3303 | * May return NULL if resources needed to perform the mapping are exhausted. | |
3304 | * Use only for reads OR writes - not for read-modify-write operations. | |
ba223c29 AL |
3305 | * Use cpu_register_map_client() to know when retrying the map operation is |
3306 | * likely to succeed. | |
6d16c2f8 | 3307 | */ |
ac1970fb | 3308 | void *address_space_map(AddressSpace *as, |
a8170e5e AK |
3309 | hwaddr addr, |
3310 | hwaddr *plen, | |
ac1970fb | 3311 | bool is_write) |
6d16c2f8 | 3312 | { |
a8170e5e | 3313 | hwaddr len = *plen; |
715c31ec PB |
3314 | hwaddr l, xlat; |
3315 | MemoryRegion *mr; | |
e81bcda5 | 3316 | void *ptr; |
6d16c2f8 | 3317 | |
e3127ae0 PB |
3318 | if (len == 0) { |
3319 | return NULL; | |
3320 | } | |
38bee5dc | 3321 | |
e3127ae0 | 3322 | l = len; |
41063e1e | 3323 | rcu_read_lock(); |
e3127ae0 | 3324 | mr = address_space_translate(as, addr, &xlat, &l, is_write); |
41063e1e | 3325 | |
e3127ae0 | 3326 | if (!memory_access_is_direct(mr, is_write)) { |
c2cba0ff | 3327 | if (atomic_xchg(&bounce.in_use, true)) { |
41063e1e | 3328 | rcu_read_unlock(); |
e3127ae0 | 3329 | return NULL; |
6d16c2f8 | 3330 | } |
e85d9db5 KW |
3331 | /* Avoid unbounded allocations */ |
3332 | l = MIN(l, TARGET_PAGE_SIZE); | |
3333 | bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l); | |
e3127ae0 PB |
3334 | bounce.addr = addr; |
3335 | bounce.len = l; | |
d3e71559 PB |
3336 | |
3337 | memory_region_ref(mr); | |
3338 | bounce.mr = mr; | |
e3127ae0 | 3339 | if (!is_write) { |
5c9eb028 PM |
3340 | address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED, |
3341 | bounce.buffer, l); | |
8ab934f9 | 3342 | } |
6d16c2f8 | 3343 | |
41063e1e | 3344 | rcu_read_unlock(); |
e3127ae0 PB |
3345 | *plen = l; |
3346 | return bounce.buffer; | |
3347 | } | |
3348 | ||
e3127ae0 | 3349 | |
d3e71559 | 3350 | memory_region_ref(mr); |
715c31ec PB |
3351 | *plen = address_space_extend_translation(as, addr, len, mr, xlat, l, is_write); |
3352 | ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen); | |
e81bcda5 PB |
3353 | rcu_read_unlock(); |
3354 | ||
3355 | return ptr; | |
6d16c2f8 AL |
3356 | } |
3357 | ||
ac1970fb | 3358 | /* Unmaps a memory region previously mapped by address_space_map(). |
6d16c2f8 AL |
3359 | * Will also mark the memory as dirty if is_write == 1. access_len gives |
3360 | * the amount of memory that was actually read or written by the caller. | |
3361 | */ | |
a8170e5e AK |
3362 | void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, |
3363 | int is_write, hwaddr access_len) | |
6d16c2f8 AL |
3364 | { |
3365 | if (buffer != bounce.buffer) { | |
d3e71559 PB |
3366 | MemoryRegion *mr; |
3367 | ram_addr_t addr1; | |
3368 | ||
07bdaa41 | 3369 | mr = memory_region_from_host(buffer, &addr1); |
d3e71559 | 3370 | assert(mr != NULL); |
6d16c2f8 | 3371 | if (is_write) { |
845b6214 | 3372 | invalidate_and_set_dirty(mr, addr1, access_len); |
6d16c2f8 | 3373 | } |
868bb33f | 3374 | if (xen_enabled()) { |
e41d7c69 | 3375 | xen_invalidate_map_cache_entry(buffer); |
050a0ddf | 3376 | } |
d3e71559 | 3377 | memory_region_unref(mr); |
6d16c2f8 AL |
3378 | return; |
3379 | } | |
3380 | if (is_write) { | |
5c9eb028 PM |
3381 | address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED, |
3382 | bounce.buffer, access_len); | |
6d16c2f8 | 3383 | } |
f8a83245 | 3384 | qemu_vfree(bounce.buffer); |
6d16c2f8 | 3385 | bounce.buffer = NULL; |
d3e71559 | 3386 | memory_region_unref(bounce.mr); |
c2cba0ff | 3387 | atomic_mb_set(&bounce.in_use, false); |
ba223c29 | 3388 | cpu_notify_map_clients(); |
6d16c2f8 | 3389 | } |
d0ecd2aa | 3390 | |
a8170e5e AK |
3391 | void *cpu_physical_memory_map(hwaddr addr, |
3392 | hwaddr *plen, | |
ac1970fb AK |
3393 | int is_write) |
3394 | { | |
3395 | return address_space_map(&address_space_memory, addr, plen, is_write); | |
3396 | } | |
3397 | ||
a8170e5e AK |
3398 | void cpu_physical_memory_unmap(void *buffer, hwaddr len, |
3399 | int is_write, hwaddr access_len) | |
ac1970fb AK |
3400 | { |
3401 | return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len); | |
3402 | } | |
3403 | ||
0ce265ff PB |
3404 | #define ARG1_DECL AddressSpace *as |
3405 | #define ARG1 as | |
3406 | #define SUFFIX | |
3407 | #define TRANSLATE(...) address_space_translate(as, __VA_ARGS__) | |
3408 | #define IS_DIRECT(mr, is_write) memory_access_is_direct(mr, is_write) | |
3409 | #define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs) | |
3410 | #define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len) | |
3411 | #define RCU_READ_LOCK(...) rcu_read_lock() | |
3412 | #define RCU_READ_UNLOCK(...) rcu_read_unlock() | |
3413 | #include "memory_ldst.inc.c" | |
1e78bcc1 | 3414 | |
1f4e496e PB |
3415 | int64_t address_space_cache_init(MemoryRegionCache *cache, |
3416 | AddressSpace *as, | |
3417 | hwaddr addr, | |
3418 | hwaddr len, | |
3419 | bool is_write) | |
3420 | { | |
90c4fe5f PB |
3421 | cache->len = len; |
3422 | cache->as = as; | |
3423 | cache->xlat = addr; | |
3424 | return len; | |
1f4e496e PB |
3425 | } |
3426 | ||
3427 | void address_space_cache_invalidate(MemoryRegionCache *cache, | |
3428 | hwaddr addr, | |
3429 | hwaddr access_len) | |
3430 | { | |
1f4e496e PB |
3431 | } |
3432 | ||
3433 | void address_space_cache_destroy(MemoryRegionCache *cache) | |
3434 | { | |
90c4fe5f | 3435 | cache->as = NULL; |
1f4e496e PB |
3436 | } |
3437 | ||
3438 | #define ARG1_DECL MemoryRegionCache *cache | |
3439 | #define ARG1 cache | |
3440 | #define SUFFIX _cached | |
90c4fe5f PB |
3441 | #define TRANSLATE(addr, ...) \ |
3442 | address_space_translate(cache->as, cache->xlat + (addr), __VA_ARGS__) | |
1f4e496e | 3443 | #define IS_DIRECT(mr, is_write) true |
90c4fe5f PB |
3444 | #define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs) |
3445 | #define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len) | |
3446 | #define RCU_READ_LOCK() rcu_read_lock() | |
3447 | #define RCU_READ_UNLOCK() rcu_read_unlock() | |
1f4e496e PB |
3448 | #include "memory_ldst.inc.c" |
3449 | ||
5e2972fd | 3450 | /* virtual memory access for debug (includes writing to ROM) */ |
f17ec444 | 3451 | int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, |
b448f2f3 | 3452 | uint8_t *buf, int len, int is_write) |
13eb76e0 FB |
3453 | { |
3454 | int l; | |
a8170e5e | 3455 | hwaddr phys_addr; |
9b3c35e0 | 3456 | target_ulong page; |
13eb76e0 | 3457 | |
79ca7a1b | 3458 | cpu_synchronize_state(cpu); |
13eb76e0 | 3459 | while (len > 0) { |
5232e4c7 PM |
3460 | int asidx; |
3461 | MemTxAttrs attrs; | |
3462 | ||
13eb76e0 | 3463 | page = addr & TARGET_PAGE_MASK; |
5232e4c7 PM |
3464 | phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs); |
3465 | asidx = cpu_asidx_from_attrs(cpu, attrs); | |
13eb76e0 FB |
3466 | /* if no physical page mapped, return an error */ |
3467 | if (phys_addr == -1) | |
3468 | return -1; | |
3469 | l = (page + TARGET_PAGE_SIZE) - addr; | |
3470 | if (l > len) | |
3471 | l = len; | |
5e2972fd | 3472 | phys_addr += (addr & ~TARGET_PAGE_MASK); |
2e38847b | 3473 | if (is_write) { |
5232e4c7 PM |
3474 | cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as, |
3475 | phys_addr, buf, l); | |
2e38847b | 3476 | } else { |
5232e4c7 PM |
3477 | address_space_rw(cpu->cpu_ases[asidx].as, phys_addr, |
3478 | MEMTXATTRS_UNSPECIFIED, | |
5c9eb028 | 3479 | buf, l, 0); |
2e38847b | 3480 | } |
13eb76e0 FB |
3481 | len -= l; |
3482 | buf += l; | |
3483 | addr += l; | |
3484 | } | |
3485 | return 0; | |
3486 | } | |
038629a6 DDAG |
3487 | |
3488 | /* | |
3489 | * Allows code that needs to deal with migration bitmaps etc to still be built | |
3490 | * target independent. | |
3491 | */ | |
20afaed9 | 3492 | size_t qemu_target_page_size(void) |
038629a6 | 3493 | { |
20afaed9 | 3494 | return TARGET_PAGE_SIZE; |
038629a6 DDAG |
3495 | } |
3496 | ||
46d702b1 JQ |
3497 | int qemu_target_page_bits(void) |
3498 | { | |
3499 | return TARGET_PAGE_BITS; | |
3500 | } | |
3501 | ||
3502 | int qemu_target_page_bits_min(void) | |
3503 | { | |
3504 | return TARGET_PAGE_BITS_MIN; | |
3505 | } | |
a68fe89c | 3506 | #endif |
13eb76e0 | 3507 | |
8e4a424b BS |
3508 | /* |
3509 | * A helper function for the _utterly broken_ virtio device model to find out if | |
3510 | * it's running on a big endian machine. Don't do this at home kids! | |
3511 | */ | |
98ed8ecf GK |
3512 | bool target_words_bigendian(void); |
3513 | bool target_words_bigendian(void) | |
8e4a424b BS |
3514 | { |
3515 | #if defined(TARGET_WORDS_BIGENDIAN) | |
3516 | return true; | |
3517 | #else | |
3518 | return false; | |
3519 | #endif | |
3520 | } | |
3521 | ||
76f35538 | 3522 | #ifndef CONFIG_USER_ONLY |
a8170e5e | 3523 | bool cpu_physical_memory_is_io(hwaddr phys_addr) |
76f35538 | 3524 | { |
5c8a00ce | 3525 | MemoryRegion*mr; |
149f54b5 | 3526 | hwaddr l = 1; |
41063e1e | 3527 | bool res; |
76f35538 | 3528 | |
41063e1e | 3529 | rcu_read_lock(); |
5c8a00ce PB |
3530 | mr = address_space_translate(&address_space_memory, |
3531 | phys_addr, &phys_addr, &l, false); | |
76f35538 | 3532 | |
41063e1e PB |
3533 | res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr)); |
3534 | rcu_read_unlock(); | |
3535 | return res; | |
76f35538 | 3536 | } |
bd2fa51f | 3537 | |
e3807054 | 3538 | int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) |
bd2fa51f MH |
3539 | { |
3540 | RAMBlock *block; | |
e3807054 | 3541 | int ret = 0; |
bd2fa51f | 3542 | |
0dc3f44a | 3543 | rcu_read_lock(); |
99e15582 | 3544 | RAMBLOCK_FOREACH(block) { |
e3807054 DDAG |
3545 | ret = func(block->idstr, block->host, block->offset, |
3546 | block->used_length, opaque); | |
3547 | if (ret) { | |
3548 | break; | |
3549 | } | |
bd2fa51f | 3550 | } |
0dc3f44a | 3551 | rcu_read_unlock(); |
e3807054 | 3552 | return ret; |
bd2fa51f | 3553 | } |
d3a5038c DDAG |
3554 | |
3555 | /* | |
3556 | * Unmap pages of memory from start to start+length such that | |
3557 | * they a) read as 0, b) Trigger whatever fault mechanism | |
3558 | * the OS provides for postcopy. | |
3559 | * The pages must be unmapped by the end of the function. | |
3560 | * Returns: 0 on success, none-0 on failure | |
3561 | * | |
3562 | */ | |
3563 | int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) | |
3564 | { | |
3565 | int ret = -1; | |
3566 | ||
3567 | uint8_t *host_startaddr = rb->host + start; | |
3568 | ||
3569 | if ((uintptr_t)host_startaddr & (rb->page_size - 1)) { | |
3570 | error_report("ram_block_discard_range: Unaligned start address: %p", | |
3571 | host_startaddr); | |
3572 | goto err; | |
3573 | } | |
3574 | ||
3575 | if ((start + length) <= rb->used_length) { | |
3576 | uint8_t *host_endaddr = host_startaddr + length; | |
3577 | if ((uintptr_t)host_endaddr & (rb->page_size - 1)) { | |
3578 | error_report("ram_block_discard_range: Unaligned end address: %p", | |
3579 | host_endaddr); | |
3580 | goto err; | |
3581 | } | |
3582 | ||
3583 | errno = ENOTSUP; /* If we are missing MADVISE etc */ | |
3584 | ||
e2fa71f5 | 3585 | if (rb->page_size == qemu_host_page_size) { |
d3a5038c | 3586 | #if defined(CONFIG_MADVISE) |
e2fa71f5 DDAG |
3587 | /* Note: We need the madvise MADV_DONTNEED behaviour of definitely |
3588 | * freeing the page. | |
3589 | */ | |
3590 | ret = madvise(host_startaddr, length, MADV_DONTNEED); | |
d3a5038c | 3591 | #endif |
e2fa71f5 DDAG |
3592 | } else { |
3593 | /* Huge page case - unfortunately it can't do DONTNEED, but | |
3594 | * it can do the equivalent by FALLOC_FL_PUNCH_HOLE in the | |
3595 | * huge page file. | |
3596 | */ | |
3597 | #ifdef CONFIG_FALLOCATE_PUNCH_HOLE | |
3598 | ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, | |
3599 | start, length); | |
3600 | #endif | |
3601 | } | |
d3a5038c DDAG |
3602 | if (ret) { |
3603 | ret = -errno; | |
3604 | error_report("ram_block_discard_range: Failed to discard range " | |
3605 | "%s:%" PRIx64 " +%zx (%d)", | |
3606 | rb->idstr, start, length, ret); | |
3607 | } | |
3608 | } else { | |
3609 | error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64 | |
3610 | "/%zx/" RAM_ADDR_FMT")", | |
3611 | rb->idstr, start, length, rb->used_length); | |
3612 | } | |
3613 | ||
3614 | err: | |
3615 | return ret; | |
3616 | } | |
3617 | ||
ec3f8c99 | 3618 | #endif |
a0be0c58 YZ |
3619 | |
3620 | void page_size_init(void) | |
3621 | { | |
3622 | /* NOTE: we can always suppose that qemu_host_page_size >= | |
3623 | TARGET_PAGE_SIZE */ | |
3624 | qemu_real_host_page_size = getpagesize(); | |
3625 | qemu_real_host_page_mask = -(intptr_t)qemu_real_host_page_size; | |
3626 | if (qemu_host_page_size == 0) { | |
3627 | qemu_host_page_size = qemu_real_host_page_size; | |
3628 | } | |
3629 | if (qemu_host_page_size < TARGET_PAGE_SIZE) { | |
3630 | qemu_host_page_size = TARGET_PAGE_SIZE; | |
3631 | } | |
3632 | qemu_host_page_mask = -(intptr_t)qemu_host_page_size; | |
3633 | } |