]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Physical memory management | |
3 | * | |
4 | * Copyright 2011 Red Hat, Inc. and/or its affiliates | |
5 | * | |
6 | * Authors: | |
7 | * Avi Kivity <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | * Contributions after 2012-01-13 are licensed under the terms of the | |
13 | * GNU GPL, version 2 or (at your option) any later version. | |
14 | */ | |
15 | ||
16 | #include "exec/memory.h" | |
17 | #include "exec/address-spaces.h" | |
18 | #include "exec/ioport.h" | |
19 | #include "qemu/bitops.h" | |
20 | #include "qom/object.h" | |
21 | #include "trace.h" | |
22 | #include <assert.h> | |
23 | ||
24 | #include "exec/memory-internal.h" | |
25 | ||
26 | //#define DEBUG_UNASSIGNED | |
27 | ||
28 | static unsigned memory_region_transaction_depth; | |
29 | static bool memory_region_update_pending; | |
30 | static bool global_dirty_log = false; | |
31 | ||
32 | /* flat_view_mutex is taken around reading as->current_map; the critical | |
33 | * section is extremely short, so I'm using a single mutex for every AS. | |
34 | * We could also RCU for the read-side. | |
35 | * | |
36 | * The BQL is taken around transaction commits, hence both locks are taken | |
37 | * while writing to as->current_map (with the BQL taken outside). | |
38 | */ | |
39 | static QemuMutex flat_view_mutex; | |
40 | ||
41 | static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners | |
42 | = QTAILQ_HEAD_INITIALIZER(memory_listeners); | |
43 | ||
44 | static QTAILQ_HEAD(, AddressSpace) address_spaces | |
45 | = QTAILQ_HEAD_INITIALIZER(address_spaces); | |
46 | ||
47 | static void memory_init(void) | |
48 | { | |
49 | qemu_mutex_init(&flat_view_mutex); | |
50 | } | |
51 | ||
52 | typedef struct AddrRange AddrRange; | |
53 | ||
54 | /* | |
55 | * Note using signed integers limits us to physical addresses at most | |
56 | * 63 bits wide. They are needed for negative offsetting in aliases | |
57 | * (large MemoryRegion::alias_offset). | |
58 | */ | |
59 | struct AddrRange { | |
60 | Int128 start; | |
61 | Int128 size; | |
62 | }; | |
63 | ||
64 | static AddrRange addrrange_make(Int128 start, Int128 size) | |
65 | { | |
66 | return (AddrRange) { start, size }; | |
67 | } | |
68 | ||
69 | static bool addrrange_equal(AddrRange r1, AddrRange r2) | |
70 | { | |
71 | return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size); | |
72 | } | |
73 | ||
74 | static Int128 addrrange_end(AddrRange r) | |
75 | { | |
76 | return int128_add(r.start, r.size); | |
77 | } | |
78 | ||
79 | static AddrRange addrrange_shift(AddrRange range, Int128 delta) | |
80 | { | |
81 | int128_addto(&range.start, delta); | |
82 | return range; | |
83 | } | |
84 | ||
85 | static bool addrrange_contains(AddrRange range, Int128 addr) | |
86 | { | |
87 | return int128_ge(addr, range.start) | |
88 | && int128_lt(addr, addrrange_end(range)); | |
89 | } | |
90 | ||
91 | static bool addrrange_intersects(AddrRange r1, AddrRange r2) | |
92 | { | |
93 | return addrrange_contains(r1, r2.start) | |
94 | || addrrange_contains(r2, r1.start); | |
95 | } | |
96 | ||
97 | static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2) | |
98 | { | |
99 | Int128 start = int128_max(r1.start, r2.start); | |
100 | Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2)); | |
101 | return addrrange_make(start, int128_sub(end, start)); | |
102 | } | |
103 | ||
104 | enum ListenerDirection { Forward, Reverse }; | |
105 | ||
106 | static bool memory_listener_match(MemoryListener *listener, | |
107 | MemoryRegionSection *section) | |
108 | { | |
109 | return !listener->address_space_filter | |
110 | || listener->address_space_filter == section->address_space; | |
111 | } | |
112 | ||
113 | #define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \ | |
114 | do { \ | |
115 | MemoryListener *_listener; \ | |
116 | \ | |
117 | switch (_direction) { \ | |
118 | case Forward: \ | |
119 | QTAILQ_FOREACH(_listener, &memory_listeners, link) { \ | |
120 | if (_listener->_callback) { \ | |
121 | _listener->_callback(_listener, ##_args); \ | |
122 | } \ | |
123 | } \ | |
124 | break; \ | |
125 | case Reverse: \ | |
126 | QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \ | |
127 | memory_listeners, link) { \ | |
128 | if (_listener->_callback) { \ | |
129 | _listener->_callback(_listener, ##_args); \ | |
130 | } \ | |
131 | } \ | |
132 | break; \ | |
133 | default: \ | |
134 | abort(); \ | |
135 | } \ | |
136 | } while (0) | |
137 | ||
138 | #define MEMORY_LISTENER_CALL(_callback, _direction, _section, _args...) \ | |
139 | do { \ | |
140 | MemoryListener *_listener; \ | |
141 | \ | |
142 | switch (_direction) { \ | |
143 | case Forward: \ | |
144 | QTAILQ_FOREACH(_listener, &memory_listeners, link) { \ | |
145 | if (_listener->_callback \ | |
146 | && memory_listener_match(_listener, _section)) { \ | |
147 | _listener->_callback(_listener, _section, ##_args); \ | |
148 | } \ | |
149 | } \ | |
150 | break; \ | |
151 | case Reverse: \ | |
152 | QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \ | |
153 | memory_listeners, link) { \ | |
154 | if (_listener->_callback \ | |
155 | && memory_listener_match(_listener, _section)) { \ | |
156 | _listener->_callback(_listener, _section, ##_args); \ | |
157 | } \ | |
158 | } \ | |
159 | break; \ | |
160 | default: \ | |
161 | abort(); \ | |
162 | } \ | |
163 | } while (0) | |
164 | ||
165 | /* No need to ref/unref .mr, the FlatRange keeps it alive. */ | |
166 | #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback) \ | |
167 | MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) { \ | |
168 | .mr = (fr)->mr, \ | |
169 | .address_space = (as), \ | |
170 | .offset_within_region = (fr)->offset_in_region, \ | |
171 | .size = (fr)->addr.size, \ | |
172 | .offset_within_address_space = int128_get64((fr)->addr.start), \ | |
173 | .readonly = (fr)->readonly, \ | |
174 | })) | |
175 | ||
176 | struct CoalescedMemoryRange { | |
177 | AddrRange addr; | |
178 | QTAILQ_ENTRY(CoalescedMemoryRange) link; | |
179 | }; | |
180 | ||
181 | struct MemoryRegionIoeventfd { | |
182 | AddrRange addr; | |
183 | bool match_data; | |
184 | uint64_t data; | |
185 | EventNotifier *e; | |
186 | }; | |
187 | ||
188 | static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a, | |
189 | MemoryRegionIoeventfd b) | |
190 | { | |
191 | if (int128_lt(a.addr.start, b.addr.start)) { | |
192 | return true; | |
193 | } else if (int128_gt(a.addr.start, b.addr.start)) { | |
194 | return false; | |
195 | } else if (int128_lt(a.addr.size, b.addr.size)) { | |
196 | return true; | |
197 | } else if (int128_gt(a.addr.size, b.addr.size)) { | |
198 | return false; | |
199 | } else if (a.match_data < b.match_data) { | |
200 | return true; | |
201 | } else if (a.match_data > b.match_data) { | |
202 | return false; | |
203 | } else if (a.match_data) { | |
204 | if (a.data < b.data) { | |
205 | return true; | |
206 | } else if (a.data > b.data) { | |
207 | return false; | |
208 | } | |
209 | } | |
210 | if (a.e < b.e) { | |
211 | return true; | |
212 | } else if (a.e > b.e) { | |
213 | return false; | |
214 | } | |
215 | return false; | |
216 | } | |
217 | ||
218 | static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a, | |
219 | MemoryRegionIoeventfd b) | |
220 | { | |
221 | return !memory_region_ioeventfd_before(a, b) | |
222 | && !memory_region_ioeventfd_before(b, a); | |
223 | } | |
224 | ||
225 | typedef struct FlatRange FlatRange; | |
226 | typedef struct FlatView FlatView; | |
227 | ||
228 | /* Range of memory in the global map. Addresses are absolute. */ | |
229 | struct FlatRange { | |
230 | MemoryRegion *mr; | |
231 | hwaddr offset_in_region; | |
232 | AddrRange addr; | |
233 | uint8_t dirty_log_mask; | |
234 | bool romd_mode; | |
235 | bool readonly; | |
236 | }; | |
237 | ||
238 | /* Flattened global view of current active memory hierarchy. Kept in sorted | |
239 | * order. | |
240 | */ | |
241 | struct FlatView { | |
242 | unsigned ref; | |
243 | FlatRange *ranges; | |
244 | unsigned nr; | |
245 | unsigned nr_allocated; | |
246 | }; | |
247 | ||
248 | typedef struct AddressSpaceOps AddressSpaceOps; | |
249 | ||
250 | #define FOR_EACH_FLAT_RANGE(var, view) \ | |
251 | for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var) | |
252 | ||
253 | static bool flatrange_equal(FlatRange *a, FlatRange *b) | |
254 | { | |
255 | return a->mr == b->mr | |
256 | && addrrange_equal(a->addr, b->addr) | |
257 | && a->offset_in_region == b->offset_in_region | |
258 | && a->romd_mode == b->romd_mode | |
259 | && a->readonly == b->readonly; | |
260 | } | |
261 | ||
262 | static void flatview_init(FlatView *view) | |
263 | { | |
264 | view->ref = 1; | |
265 | view->ranges = NULL; | |
266 | view->nr = 0; | |
267 | view->nr_allocated = 0; | |
268 | } | |
269 | ||
270 | /* Insert a range into a given position. Caller is responsible for maintaining | |
271 | * sorting order. | |
272 | */ | |
273 | static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) | |
274 | { | |
275 | if (view->nr == view->nr_allocated) { | |
276 | view->nr_allocated = MAX(2 * view->nr, 10); | |
277 | view->ranges = g_realloc(view->ranges, | |
278 | view->nr_allocated * sizeof(*view->ranges)); | |
279 | } | |
280 | memmove(view->ranges + pos + 1, view->ranges + pos, | |
281 | (view->nr - pos) * sizeof(FlatRange)); | |
282 | view->ranges[pos] = *range; | |
283 | memory_region_ref(range->mr); | |
284 | ++view->nr; | |
285 | } | |
286 | ||
287 | static void flatview_destroy(FlatView *view) | |
288 | { | |
289 | int i; | |
290 | ||
291 | for (i = 0; i < view->nr; i++) { | |
292 | memory_region_unref(view->ranges[i].mr); | |
293 | } | |
294 | g_free(view->ranges); | |
295 | g_free(view); | |
296 | } | |
297 | ||
298 | static void flatview_ref(FlatView *view) | |
299 | { | |
300 | atomic_inc(&view->ref); | |
301 | } | |
302 | ||
303 | static void flatview_unref(FlatView *view) | |
304 | { | |
305 | if (atomic_fetch_dec(&view->ref) == 1) { | |
306 | flatview_destroy(view); | |
307 | } | |
308 | } | |
309 | ||
310 | static bool can_merge(FlatRange *r1, FlatRange *r2) | |
311 | { | |
312 | return int128_eq(addrrange_end(r1->addr), r2->addr.start) | |
313 | && r1->mr == r2->mr | |
314 | && int128_eq(int128_add(int128_make64(r1->offset_in_region), | |
315 | r1->addr.size), | |
316 | int128_make64(r2->offset_in_region)) | |
317 | && r1->dirty_log_mask == r2->dirty_log_mask | |
318 | && r1->romd_mode == r2->romd_mode | |
319 | && r1->readonly == r2->readonly; | |
320 | } | |
321 | ||
322 | /* Attempt to simplify a view by merging adjacent ranges */ | |
323 | static void flatview_simplify(FlatView *view) | |
324 | { | |
325 | unsigned i, j; | |
326 | ||
327 | i = 0; | |
328 | while (i < view->nr) { | |
329 | j = i + 1; | |
330 | while (j < view->nr | |
331 | && can_merge(&view->ranges[j-1], &view->ranges[j])) { | |
332 | int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size); | |
333 | ++j; | |
334 | } | |
335 | ++i; | |
336 | memmove(&view->ranges[i], &view->ranges[j], | |
337 | (view->nr - j) * sizeof(view->ranges[j])); | |
338 | view->nr -= j - i; | |
339 | } | |
340 | } | |
341 | ||
342 | static bool memory_region_big_endian(MemoryRegion *mr) | |
343 | { | |
344 | #ifdef TARGET_WORDS_BIGENDIAN | |
345 | return mr->ops->endianness != DEVICE_LITTLE_ENDIAN; | |
346 | #else | |
347 | return mr->ops->endianness == DEVICE_BIG_ENDIAN; | |
348 | #endif | |
349 | } | |
350 | ||
351 | static bool memory_region_wrong_endianness(MemoryRegion *mr) | |
352 | { | |
353 | #ifdef TARGET_WORDS_BIGENDIAN | |
354 | return mr->ops->endianness == DEVICE_LITTLE_ENDIAN; | |
355 | #else | |
356 | return mr->ops->endianness == DEVICE_BIG_ENDIAN; | |
357 | #endif | |
358 | } | |
359 | ||
360 | static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size) | |
361 | { | |
362 | if (memory_region_wrong_endianness(mr)) { | |
363 | switch (size) { | |
364 | case 1: | |
365 | break; | |
366 | case 2: | |
367 | *data = bswap16(*data); | |
368 | break; | |
369 | case 4: | |
370 | *data = bswap32(*data); | |
371 | break; | |
372 | case 8: | |
373 | *data = bswap64(*data); | |
374 | break; | |
375 | default: | |
376 | abort(); | |
377 | } | |
378 | } | |
379 | } | |
380 | ||
381 | static void memory_region_oldmmio_read_accessor(MemoryRegion *mr, | |
382 | hwaddr addr, | |
383 | uint64_t *value, | |
384 | unsigned size, | |
385 | unsigned shift, | |
386 | uint64_t mask) | |
387 | { | |
388 | uint64_t tmp; | |
389 | ||
390 | tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr); | |
391 | trace_memory_region_ops_read(mr, addr, tmp, size); | |
392 | *value |= (tmp & mask) << shift; | |
393 | } | |
394 | ||
395 | static void memory_region_read_accessor(MemoryRegion *mr, | |
396 | hwaddr addr, | |
397 | uint64_t *value, | |
398 | unsigned size, | |
399 | unsigned shift, | |
400 | uint64_t mask) | |
401 | { | |
402 | uint64_t tmp; | |
403 | ||
404 | if (mr->flush_coalesced_mmio) { | |
405 | qemu_flush_coalesced_mmio_buffer(); | |
406 | } | |
407 | tmp = mr->ops->read(mr->opaque, addr, size); | |
408 | trace_memory_region_ops_read(mr, addr, tmp, size); | |
409 | *value |= (tmp & mask) << shift; | |
410 | } | |
411 | ||
412 | static void memory_region_oldmmio_write_accessor(MemoryRegion *mr, | |
413 | hwaddr addr, | |
414 | uint64_t *value, | |
415 | unsigned size, | |
416 | unsigned shift, | |
417 | uint64_t mask) | |
418 | { | |
419 | uint64_t tmp; | |
420 | ||
421 | tmp = (*value >> shift) & mask; | |
422 | trace_memory_region_ops_write(mr, addr, tmp, size); | |
423 | mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp); | |
424 | } | |
425 | ||
426 | static void memory_region_write_accessor(MemoryRegion *mr, | |
427 | hwaddr addr, | |
428 | uint64_t *value, | |
429 | unsigned size, | |
430 | unsigned shift, | |
431 | uint64_t mask) | |
432 | { | |
433 | uint64_t tmp; | |
434 | ||
435 | if (mr->flush_coalesced_mmio) { | |
436 | qemu_flush_coalesced_mmio_buffer(); | |
437 | } | |
438 | tmp = (*value >> shift) & mask; | |
439 | trace_memory_region_ops_write(mr, addr, tmp, size); | |
440 | mr->ops->write(mr->opaque, addr, tmp, size); | |
441 | } | |
442 | ||
443 | static void access_with_adjusted_size(hwaddr addr, | |
444 | uint64_t *value, | |
445 | unsigned size, | |
446 | unsigned access_size_min, | |
447 | unsigned access_size_max, | |
448 | void (*access)(MemoryRegion *mr, | |
449 | hwaddr addr, | |
450 | uint64_t *value, | |
451 | unsigned size, | |
452 | unsigned shift, | |
453 | uint64_t mask), | |
454 | MemoryRegion *mr) | |
455 | { | |
456 | uint64_t access_mask; | |
457 | unsigned access_size; | |
458 | unsigned i; | |
459 | ||
460 | if (!access_size_min) { | |
461 | access_size_min = 1; | |
462 | } | |
463 | if (!access_size_max) { | |
464 | access_size_max = 4; | |
465 | } | |
466 | ||
467 | /* FIXME: support unaligned access? */ | |
468 | access_size = MAX(MIN(size, access_size_max), access_size_min); | |
469 | access_mask = -1ULL >> (64 - access_size * 8); | |
470 | if (memory_region_big_endian(mr)) { | |
471 | for (i = 0; i < size; i += access_size) { | |
472 | access(mr, addr + i, value, access_size, | |
473 | (size - access_size - i) * 8, access_mask); | |
474 | } | |
475 | } else { | |
476 | for (i = 0; i < size; i += access_size) { | |
477 | access(mr, addr + i, value, access_size, i * 8, access_mask); | |
478 | } | |
479 | } | |
480 | } | |
481 | ||
482 | static AddressSpace *memory_region_to_address_space(MemoryRegion *mr) | |
483 | { | |
484 | AddressSpace *as; | |
485 | ||
486 | while (mr->parent) { | |
487 | mr = mr->parent; | |
488 | } | |
489 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { | |
490 | if (mr == as->root) { | |
491 | return as; | |
492 | } | |
493 | } | |
494 | abort(); | |
495 | } | |
496 | ||
497 | /* Render a memory region into the global view. Ranges in @view obscure | |
498 | * ranges in @mr. | |
499 | */ | |
500 | static void render_memory_region(FlatView *view, | |
501 | MemoryRegion *mr, | |
502 | Int128 base, | |
503 | AddrRange clip, | |
504 | bool readonly) | |
505 | { | |
506 | MemoryRegion *subregion; | |
507 | unsigned i; | |
508 | hwaddr offset_in_region; | |
509 | Int128 remain; | |
510 | Int128 now; | |
511 | FlatRange fr; | |
512 | AddrRange tmp; | |
513 | ||
514 | if (!mr->enabled) { | |
515 | return; | |
516 | } | |
517 | ||
518 | int128_addto(&base, int128_make64(mr->addr)); | |
519 | readonly |= mr->readonly; | |
520 | ||
521 | tmp = addrrange_make(base, mr->size); | |
522 | ||
523 | if (!addrrange_intersects(tmp, clip)) { | |
524 | return; | |
525 | } | |
526 | ||
527 | clip = addrrange_intersection(tmp, clip); | |
528 | ||
529 | if (mr->alias) { | |
530 | int128_subfrom(&base, int128_make64(mr->alias->addr)); | |
531 | int128_subfrom(&base, int128_make64(mr->alias_offset)); | |
532 | render_memory_region(view, mr->alias, base, clip, readonly); | |
533 | return; | |
534 | } | |
535 | ||
536 | /* Render subregions in priority order. */ | |
537 | QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { | |
538 | render_memory_region(view, subregion, base, clip, readonly); | |
539 | } | |
540 | ||
541 | if (!mr->terminates) { | |
542 | return; | |
543 | } | |
544 | ||
545 | offset_in_region = int128_get64(int128_sub(clip.start, base)); | |
546 | base = clip.start; | |
547 | remain = clip.size; | |
548 | ||
549 | fr.mr = mr; | |
550 | fr.dirty_log_mask = mr->dirty_log_mask; | |
551 | fr.romd_mode = mr->romd_mode; | |
552 | fr.readonly = readonly; | |
553 | ||
554 | /* Render the region itself into any gaps left by the current view. */ | |
555 | for (i = 0; i < view->nr && int128_nz(remain); ++i) { | |
556 | if (int128_ge(base, addrrange_end(view->ranges[i].addr))) { | |
557 | continue; | |
558 | } | |
559 | if (int128_lt(base, view->ranges[i].addr.start)) { | |
560 | now = int128_min(remain, | |
561 | int128_sub(view->ranges[i].addr.start, base)); | |
562 | fr.offset_in_region = offset_in_region; | |
563 | fr.addr = addrrange_make(base, now); | |
564 | flatview_insert(view, i, &fr); | |
565 | ++i; | |
566 | int128_addto(&base, now); | |
567 | offset_in_region += int128_get64(now); | |
568 | int128_subfrom(&remain, now); | |
569 | } | |
570 | now = int128_sub(int128_min(int128_add(base, remain), | |
571 | addrrange_end(view->ranges[i].addr)), | |
572 | base); | |
573 | int128_addto(&base, now); | |
574 | offset_in_region += int128_get64(now); | |
575 | int128_subfrom(&remain, now); | |
576 | } | |
577 | if (int128_nz(remain)) { | |
578 | fr.offset_in_region = offset_in_region; | |
579 | fr.addr = addrrange_make(base, remain); | |
580 | flatview_insert(view, i, &fr); | |
581 | } | |
582 | } | |
583 | ||
584 | /* Render a memory topology into a list of disjoint absolute ranges. */ | |
585 | static FlatView *generate_memory_topology(MemoryRegion *mr) | |
586 | { | |
587 | FlatView *view; | |
588 | ||
589 | view = g_new(FlatView, 1); | |
590 | flatview_init(view); | |
591 | ||
592 | if (mr) { | |
593 | render_memory_region(view, mr, int128_zero(), | |
594 | addrrange_make(int128_zero(), int128_2_64()), false); | |
595 | } | |
596 | flatview_simplify(view); | |
597 | ||
598 | return view; | |
599 | } | |
600 | ||
601 | static void address_space_add_del_ioeventfds(AddressSpace *as, | |
602 | MemoryRegionIoeventfd *fds_new, | |
603 | unsigned fds_new_nb, | |
604 | MemoryRegionIoeventfd *fds_old, | |
605 | unsigned fds_old_nb) | |
606 | { | |
607 | unsigned iold, inew; | |
608 | MemoryRegionIoeventfd *fd; | |
609 | MemoryRegionSection section; | |
610 | ||
611 | /* Generate a symmetric difference of the old and new fd sets, adding | |
612 | * and deleting as necessary. | |
613 | */ | |
614 | ||
615 | iold = inew = 0; | |
616 | while (iold < fds_old_nb || inew < fds_new_nb) { | |
617 | if (iold < fds_old_nb | |
618 | && (inew == fds_new_nb | |
619 | || memory_region_ioeventfd_before(fds_old[iold], | |
620 | fds_new[inew]))) { | |
621 | fd = &fds_old[iold]; | |
622 | section = (MemoryRegionSection) { | |
623 | .address_space = as, | |
624 | .offset_within_address_space = int128_get64(fd->addr.start), | |
625 | .size = fd->addr.size, | |
626 | }; | |
627 | MEMORY_LISTENER_CALL(eventfd_del, Forward, §ion, | |
628 | fd->match_data, fd->data, fd->e); | |
629 | ++iold; | |
630 | } else if (inew < fds_new_nb | |
631 | && (iold == fds_old_nb | |
632 | || memory_region_ioeventfd_before(fds_new[inew], | |
633 | fds_old[iold]))) { | |
634 | fd = &fds_new[inew]; | |
635 | section = (MemoryRegionSection) { | |
636 | .address_space = as, | |
637 | .offset_within_address_space = int128_get64(fd->addr.start), | |
638 | .size = fd->addr.size, | |
639 | }; | |
640 | MEMORY_LISTENER_CALL(eventfd_add, Reverse, §ion, | |
641 | fd->match_data, fd->data, fd->e); | |
642 | ++inew; | |
643 | } else { | |
644 | ++iold; | |
645 | ++inew; | |
646 | } | |
647 | } | |
648 | } | |
649 | ||
650 | static FlatView *address_space_get_flatview(AddressSpace *as) | |
651 | { | |
652 | FlatView *view; | |
653 | ||
654 | qemu_mutex_lock(&flat_view_mutex); | |
655 | view = as->current_map; | |
656 | flatview_ref(view); | |
657 | qemu_mutex_unlock(&flat_view_mutex); | |
658 | return view; | |
659 | } | |
660 | ||
661 | static void address_space_update_ioeventfds(AddressSpace *as) | |
662 | { | |
663 | FlatView *view; | |
664 | FlatRange *fr; | |
665 | unsigned ioeventfd_nb = 0; | |
666 | MemoryRegionIoeventfd *ioeventfds = NULL; | |
667 | AddrRange tmp; | |
668 | unsigned i; | |
669 | ||
670 | view = address_space_get_flatview(as); | |
671 | FOR_EACH_FLAT_RANGE(fr, view) { | |
672 | for (i = 0; i < fr->mr->ioeventfd_nb; ++i) { | |
673 | tmp = addrrange_shift(fr->mr->ioeventfds[i].addr, | |
674 | int128_sub(fr->addr.start, | |
675 | int128_make64(fr->offset_in_region))); | |
676 | if (addrrange_intersects(fr->addr, tmp)) { | |
677 | ++ioeventfd_nb; | |
678 | ioeventfds = g_realloc(ioeventfds, | |
679 | ioeventfd_nb * sizeof(*ioeventfds)); | |
680 | ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i]; | |
681 | ioeventfds[ioeventfd_nb-1].addr = tmp; | |
682 | } | |
683 | } | |
684 | } | |
685 | ||
686 | address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb, | |
687 | as->ioeventfds, as->ioeventfd_nb); | |
688 | ||
689 | g_free(as->ioeventfds); | |
690 | as->ioeventfds = ioeventfds; | |
691 | as->ioeventfd_nb = ioeventfd_nb; | |
692 | flatview_unref(view); | |
693 | } | |
694 | ||
695 | static void address_space_update_topology_pass(AddressSpace *as, | |
696 | const FlatView *old_view, | |
697 | const FlatView *new_view, | |
698 | bool adding) | |
699 | { | |
700 | unsigned iold, inew; | |
701 | FlatRange *frold, *frnew; | |
702 | ||
703 | /* Generate a symmetric difference of the old and new memory maps. | |
704 | * Kill ranges in the old map, and instantiate ranges in the new map. | |
705 | */ | |
706 | iold = inew = 0; | |
707 | while (iold < old_view->nr || inew < new_view->nr) { | |
708 | if (iold < old_view->nr) { | |
709 | frold = &old_view->ranges[iold]; | |
710 | } else { | |
711 | frold = NULL; | |
712 | } | |
713 | if (inew < new_view->nr) { | |
714 | frnew = &new_view->ranges[inew]; | |
715 | } else { | |
716 | frnew = NULL; | |
717 | } | |
718 | ||
719 | if (frold | |
720 | && (!frnew | |
721 | || int128_lt(frold->addr.start, frnew->addr.start) | |
722 | || (int128_eq(frold->addr.start, frnew->addr.start) | |
723 | && !flatrange_equal(frold, frnew)))) { | |
724 | /* In old but not in new, or in both but attributes changed. */ | |
725 | ||
726 | if (!adding) { | |
727 | MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del); | |
728 | } | |
729 | ||
730 | ++iold; | |
731 | } else if (frold && frnew && flatrange_equal(frold, frnew)) { | |
732 | /* In both and unchanged (except logging may have changed) */ | |
733 | ||
734 | if (adding) { | |
735 | MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop); | |
736 | if (frold->dirty_log_mask && !frnew->dirty_log_mask) { | |
737 | MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop); | |
738 | } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) { | |
739 | MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start); | |
740 | } | |
741 | } | |
742 | ||
743 | ++iold; | |
744 | ++inew; | |
745 | } else { | |
746 | /* In new */ | |
747 | ||
748 | if (adding) { | |
749 | MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add); | |
750 | } | |
751 | ||
752 | ++inew; | |
753 | } | |
754 | } | |
755 | } | |
756 | ||
757 | ||
758 | static void address_space_update_topology(AddressSpace *as) | |
759 | { | |
760 | FlatView *old_view = address_space_get_flatview(as); | |
761 | FlatView *new_view = generate_memory_topology(as->root); | |
762 | ||
763 | address_space_update_topology_pass(as, old_view, new_view, false); | |
764 | address_space_update_topology_pass(as, old_view, new_view, true); | |
765 | ||
766 | qemu_mutex_lock(&flat_view_mutex); | |
767 | flatview_unref(as->current_map); | |
768 | as->current_map = new_view; | |
769 | qemu_mutex_unlock(&flat_view_mutex); | |
770 | ||
771 | /* Note that all the old MemoryRegions are still alive up to this | |
772 | * point. This relieves most MemoryListeners from the need to | |
773 | * ref/unref the MemoryRegions they get---unless they use them | |
774 | * outside the iothread mutex, in which case precise reference | |
775 | * counting is necessary. | |
776 | */ | |
777 | flatview_unref(old_view); | |
778 | ||
779 | address_space_update_ioeventfds(as); | |
780 | } | |
781 | ||
782 | void memory_region_transaction_begin(void) | |
783 | { | |
784 | qemu_flush_coalesced_mmio_buffer(); | |
785 | ++memory_region_transaction_depth; | |
786 | } | |
787 | ||
788 | void memory_region_transaction_commit(void) | |
789 | { | |
790 | AddressSpace *as; | |
791 | ||
792 | assert(memory_region_transaction_depth); | |
793 | --memory_region_transaction_depth; | |
794 | if (!memory_region_transaction_depth && memory_region_update_pending) { | |
795 | memory_region_update_pending = false; | |
796 | MEMORY_LISTENER_CALL_GLOBAL(begin, Forward); | |
797 | ||
798 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { | |
799 | address_space_update_topology(as); | |
800 | } | |
801 | ||
802 | MEMORY_LISTENER_CALL_GLOBAL(commit, Forward); | |
803 | } | |
804 | } | |
805 | ||
806 | static void memory_region_destructor_none(MemoryRegion *mr) | |
807 | { | |
808 | } | |
809 | ||
810 | static void memory_region_destructor_ram(MemoryRegion *mr) | |
811 | { | |
812 | qemu_ram_free(mr->ram_addr); | |
813 | } | |
814 | ||
815 | static void memory_region_destructor_alias(MemoryRegion *mr) | |
816 | { | |
817 | memory_region_unref(mr->alias); | |
818 | } | |
819 | ||
820 | static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr) | |
821 | { | |
822 | qemu_ram_free_from_ptr(mr->ram_addr); | |
823 | } | |
824 | ||
825 | static void memory_region_destructor_rom_device(MemoryRegion *mr) | |
826 | { | |
827 | qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK); | |
828 | } | |
829 | ||
830 | void memory_region_init(MemoryRegion *mr, | |
831 | Object *owner, | |
832 | const char *name, | |
833 | uint64_t size) | |
834 | { | |
835 | mr->ops = &unassigned_mem_ops; | |
836 | mr->opaque = NULL; | |
837 | mr->owner = owner; | |
838 | mr->iommu_ops = NULL; | |
839 | mr->parent = NULL; | |
840 | mr->size = int128_make64(size); | |
841 | if (size == UINT64_MAX) { | |
842 | mr->size = int128_2_64(); | |
843 | } | |
844 | mr->addr = 0; | |
845 | mr->subpage = false; | |
846 | mr->enabled = true; | |
847 | mr->terminates = false; | |
848 | mr->ram = false; | |
849 | mr->romd_mode = true; | |
850 | mr->readonly = false; | |
851 | mr->rom_device = false; | |
852 | mr->destructor = memory_region_destructor_none; | |
853 | mr->priority = 0; | |
854 | mr->may_overlap = false; | |
855 | mr->alias = NULL; | |
856 | QTAILQ_INIT(&mr->subregions); | |
857 | memset(&mr->subregions_link, 0, sizeof mr->subregions_link); | |
858 | QTAILQ_INIT(&mr->coalesced); | |
859 | mr->name = g_strdup(name); | |
860 | mr->dirty_log_mask = 0; | |
861 | mr->ioeventfd_nb = 0; | |
862 | mr->ioeventfds = NULL; | |
863 | mr->flush_coalesced_mmio = false; | |
864 | } | |
865 | ||
866 | static uint64_t unassigned_mem_read(void *opaque, hwaddr addr, | |
867 | unsigned size) | |
868 | { | |
869 | #ifdef DEBUG_UNASSIGNED | |
870 | printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); | |
871 | #endif | |
872 | if (current_cpu != NULL) { | |
873 | cpu_unassigned_access(current_cpu, addr, false, false, 0, size); | |
874 | } | |
875 | return 0; | |
876 | } | |
877 | ||
878 | static void unassigned_mem_write(void *opaque, hwaddr addr, | |
879 | uint64_t val, unsigned size) | |
880 | { | |
881 | #ifdef DEBUG_UNASSIGNED | |
882 | printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val); | |
883 | #endif | |
884 | if (current_cpu != NULL) { | |
885 | cpu_unassigned_access(current_cpu, addr, true, false, 0, size); | |
886 | } | |
887 | } | |
888 | ||
889 | static bool unassigned_mem_accepts(void *opaque, hwaddr addr, | |
890 | unsigned size, bool is_write) | |
891 | { | |
892 | return false; | |
893 | } | |
894 | ||
895 | const MemoryRegionOps unassigned_mem_ops = { | |
896 | .valid.accepts = unassigned_mem_accepts, | |
897 | .endianness = DEVICE_NATIVE_ENDIAN, | |
898 | }; | |
899 | ||
900 | bool memory_region_access_valid(MemoryRegion *mr, | |
901 | hwaddr addr, | |
902 | unsigned size, | |
903 | bool is_write) | |
904 | { | |
905 | int access_size_min, access_size_max; | |
906 | int access_size, i; | |
907 | ||
908 | if (!mr->ops->valid.unaligned && (addr & (size - 1))) { | |
909 | return false; | |
910 | } | |
911 | ||
912 | if (!mr->ops->valid.accepts) { | |
913 | return true; | |
914 | } | |
915 | ||
916 | access_size_min = mr->ops->valid.min_access_size; | |
917 | if (!mr->ops->valid.min_access_size) { | |
918 | access_size_min = 1; | |
919 | } | |
920 | ||
921 | access_size_max = mr->ops->valid.max_access_size; | |
922 | if (!mr->ops->valid.max_access_size) { | |
923 | access_size_max = 4; | |
924 | } | |
925 | ||
926 | access_size = MAX(MIN(size, access_size_max), access_size_min); | |
927 | for (i = 0; i < size; i += access_size) { | |
928 | if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size, | |
929 | is_write)) { | |
930 | return false; | |
931 | } | |
932 | } | |
933 | ||
934 | return true; | |
935 | } | |
936 | ||
937 | static uint64_t memory_region_dispatch_read1(MemoryRegion *mr, | |
938 | hwaddr addr, | |
939 | unsigned size) | |
940 | { | |
941 | uint64_t data = 0; | |
942 | ||
943 | if (mr->ops->read) { | |
944 | access_with_adjusted_size(addr, &data, size, | |
945 | mr->ops->impl.min_access_size, | |
946 | mr->ops->impl.max_access_size, | |
947 | memory_region_read_accessor, mr); | |
948 | } else { | |
949 | access_with_adjusted_size(addr, &data, size, 1, 4, | |
950 | memory_region_oldmmio_read_accessor, mr); | |
951 | } | |
952 | ||
953 | return data; | |
954 | } | |
955 | ||
956 | static bool memory_region_dispatch_read(MemoryRegion *mr, | |
957 | hwaddr addr, | |
958 | uint64_t *pval, | |
959 | unsigned size) | |
960 | { | |
961 | if (!memory_region_access_valid(mr, addr, size, false)) { | |
962 | *pval = unassigned_mem_read(mr, addr, size); | |
963 | return true; | |
964 | } | |
965 | ||
966 | *pval = memory_region_dispatch_read1(mr, addr, size); | |
967 | adjust_endianness(mr, pval, size); | |
968 | return false; | |
969 | } | |
970 | ||
971 | static bool memory_region_dispatch_write(MemoryRegion *mr, | |
972 | hwaddr addr, | |
973 | uint64_t data, | |
974 | unsigned size) | |
975 | { | |
976 | if (!memory_region_access_valid(mr, addr, size, true)) { | |
977 | unassigned_mem_write(mr, addr, data, size); | |
978 | return true; | |
979 | } | |
980 | ||
981 | adjust_endianness(mr, &data, size); | |
982 | ||
983 | if (mr->ops->write) { | |
984 | access_with_adjusted_size(addr, &data, size, | |
985 | mr->ops->impl.min_access_size, | |
986 | mr->ops->impl.max_access_size, | |
987 | memory_region_write_accessor, mr); | |
988 | } else { | |
989 | access_with_adjusted_size(addr, &data, size, 1, 4, | |
990 | memory_region_oldmmio_write_accessor, mr); | |
991 | } | |
992 | return false; | |
993 | } | |
994 | ||
995 | void memory_region_init_io(MemoryRegion *mr, | |
996 | Object *owner, | |
997 | const MemoryRegionOps *ops, | |
998 | void *opaque, | |
999 | const char *name, | |
1000 | uint64_t size) | |
1001 | { | |
1002 | memory_region_init(mr, owner, name, size); | |
1003 | mr->ops = ops; | |
1004 | mr->opaque = opaque; | |
1005 | mr->terminates = true; | |
1006 | mr->ram_addr = ~(ram_addr_t)0; | |
1007 | } | |
1008 | ||
1009 | void memory_region_init_ram(MemoryRegion *mr, | |
1010 | Object *owner, | |
1011 | const char *name, | |
1012 | uint64_t size) | |
1013 | { | |
1014 | memory_region_init(mr, owner, name, size); | |
1015 | mr->ram = true; | |
1016 | mr->terminates = true; | |
1017 | mr->destructor = memory_region_destructor_ram; | |
1018 | mr->ram_addr = qemu_ram_alloc(size, mr); | |
1019 | } | |
1020 | ||
1021 | void memory_region_init_ram_ptr(MemoryRegion *mr, | |
1022 | Object *owner, | |
1023 | const char *name, | |
1024 | uint64_t size, | |
1025 | void *ptr) | |
1026 | { | |
1027 | memory_region_init(mr, owner, name, size); | |
1028 | mr->ram = true; | |
1029 | mr->terminates = true; | |
1030 | mr->destructor = memory_region_destructor_ram_from_ptr; | |
1031 | mr->ram_addr = qemu_ram_alloc_from_ptr(size, ptr, mr); | |
1032 | } | |
1033 | ||
1034 | void memory_region_init_alias(MemoryRegion *mr, | |
1035 | Object *owner, | |
1036 | const char *name, | |
1037 | MemoryRegion *orig, | |
1038 | hwaddr offset, | |
1039 | uint64_t size) | |
1040 | { | |
1041 | memory_region_init(mr, owner, name, size); | |
1042 | memory_region_ref(orig); | |
1043 | mr->destructor = memory_region_destructor_alias; | |
1044 | mr->alias = orig; | |
1045 | mr->alias_offset = offset; | |
1046 | } | |
1047 | ||
1048 | void memory_region_init_rom_device(MemoryRegion *mr, | |
1049 | Object *owner, | |
1050 | const MemoryRegionOps *ops, | |
1051 | void *opaque, | |
1052 | const char *name, | |
1053 | uint64_t size) | |
1054 | { | |
1055 | memory_region_init(mr, owner, name, size); | |
1056 | mr->ops = ops; | |
1057 | mr->opaque = opaque; | |
1058 | mr->terminates = true; | |
1059 | mr->rom_device = true; | |
1060 | mr->destructor = memory_region_destructor_rom_device; | |
1061 | mr->ram_addr = qemu_ram_alloc(size, mr); | |
1062 | } | |
1063 | ||
1064 | void memory_region_init_iommu(MemoryRegion *mr, | |
1065 | Object *owner, | |
1066 | const MemoryRegionIOMMUOps *ops, | |
1067 | const char *name, | |
1068 | uint64_t size) | |
1069 | { | |
1070 | memory_region_init(mr, owner, name, size); | |
1071 | mr->iommu_ops = ops, | |
1072 | mr->terminates = true; /* then re-forwards */ | |
1073 | notifier_list_init(&mr->iommu_notify); | |
1074 | } | |
1075 | ||
1076 | void memory_region_init_reservation(MemoryRegion *mr, | |
1077 | Object *owner, | |
1078 | const char *name, | |
1079 | uint64_t size) | |
1080 | { | |
1081 | memory_region_init_io(mr, owner, &unassigned_mem_ops, mr, name, size); | |
1082 | } | |
1083 | ||
1084 | void memory_region_destroy(MemoryRegion *mr) | |
1085 | { | |
1086 | assert(QTAILQ_EMPTY(&mr->subregions)); | |
1087 | assert(memory_region_transaction_depth == 0); | |
1088 | mr->destructor(mr); | |
1089 | memory_region_clear_coalescing(mr); | |
1090 | g_free((char *)mr->name); | |
1091 | g_free(mr->ioeventfds); | |
1092 | } | |
1093 | ||
1094 | Object *memory_region_owner(MemoryRegion *mr) | |
1095 | { | |
1096 | return mr->owner; | |
1097 | } | |
1098 | ||
1099 | void memory_region_ref(MemoryRegion *mr) | |
1100 | { | |
1101 | if (mr && mr->owner) { | |
1102 | object_ref(mr->owner); | |
1103 | } | |
1104 | } | |
1105 | ||
1106 | void memory_region_unref(MemoryRegion *mr) | |
1107 | { | |
1108 | if (mr && mr->owner) { | |
1109 | object_unref(mr->owner); | |
1110 | } | |
1111 | } | |
1112 | ||
1113 | uint64_t memory_region_size(MemoryRegion *mr) | |
1114 | { | |
1115 | if (int128_eq(mr->size, int128_2_64())) { | |
1116 | return UINT64_MAX; | |
1117 | } | |
1118 | return int128_get64(mr->size); | |
1119 | } | |
1120 | ||
1121 | const char *memory_region_name(MemoryRegion *mr) | |
1122 | { | |
1123 | return mr->name; | |
1124 | } | |
1125 | ||
1126 | bool memory_region_is_ram(MemoryRegion *mr) | |
1127 | { | |
1128 | return mr->ram; | |
1129 | } | |
1130 | ||
1131 | bool memory_region_is_logging(MemoryRegion *mr) | |
1132 | { | |
1133 | return mr->dirty_log_mask; | |
1134 | } | |
1135 | ||
1136 | bool memory_region_is_rom(MemoryRegion *mr) | |
1137 | { | |
1138 | return mr->ram && mr->readonly; | |
1139 | } | |
1140 | ||
1141 | bool memory_region_is_iommu(MemoryRegion *mr) | |
1142 | { | |
1143 | return mr->iommu_ops; | |
1144 | } | |
1145 | ||
1146 | void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n) | |
1147 | { | |
1148 | notifier_list_add(&mr->iommu_notify, n); | |
1149 | } | |
1150 | ||
1151 | void memory_region_unregister_iommu_notifier(Notifier *n) | |
1152 | { | |
1153 | notifier_remove(n); | |
1154 | } | |
1155 | ||
1156 | void memory_region_notify_iommu(MemoryRegion *mr, | |
1157 | IOMMUTLBEntry entry) | |
1158 | { | |
1159 | assert(memory_region_is_iommu(mr)); | |
1160 | notifier_list_notify(&mr->iommu_notify, &entry); | |
1161 | } | |
1162 | ||
1163 | void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) | |
1164 | { | |
1165 | uint8_t mask = 1 << client; | |
1166 | ||
1167 | memory_region_transaction_begin(); | |
1168 | mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask); | |
1169 | memory_region_update_pending |= mr->enabled; | |
1170 | memory_region_transaction_commit(); | |
1171 | } | |
1172 | ||
1173 | bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr, | |
1174 | hwaddr size, unsigned client) | |
1175 | { | |
1176 | assert(mr->terminates); | |
1177 | return cpu_physical_memory_get_dirty(mr->ram_addr + addr, size, | |
1178 | 1 << client); | |
1179 | } | |
1180 | ||
1181 | void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, | |
1182 | hwaddr size) | |
1183 | { | |
1184 | assert(mr->terminates); | |
1185 | return cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size, -1); | |
1186 | } | |
1187 | ||
1188 | bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr, | |
1189 | hwaddr size, unsigned client) | |
1190 | { | |
1191 | bool ret; | |
1192 | assert(mr->terminates); | |
1193 | ret = cpu_physical_memory_get_dirty(mr->ram_addr + addr, size, | |
1194 | 1 << client); | |
1195 | if (ret) { | |
1196 | cpu_physical_memory_reset_dirty(mr->ram_addr + addr, | |
1197 | mr->ram_addr + addr + size, | |
1198 | 1 << client); | |
1199 | } | |
1200 | return ret; | |
1201 | } | |
1202 | ||
1203 | ||
1204 | void memory_region_sync_dirty_bitmap(MemoryRegion *mr) | |
1205 | { | |
1206 | AddressSpace *as; | |
1207 | FlatRange *fr; | |
1208 | ||
1209 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { | |
1210 | FlatView *view = address_space_get_flatview(as); | |
1211 | FOR_EACH_FLAT_RANGE(fr, view) { | |
1212 | if (fr->mr == mr) { | |
1213 | MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync); | |
1214 | } | |
1215 | } | |
1216 | flatview_unref(view); | |
1217 | } | |
1218 | } | |
1219 | ||
1220 | void memory_region_set_readonly(MemoryRegion *mr, bool readonly) | |
1221 | { | |
1222 | if (mr->readonly != readonly) { | |
1223 | memory_region_transaction_begin(); | |
1224 | mr->readonly = readonly; | |
1225 | memory_region_update_pending |= mr->enabled; | |
1226 | memory_region_transaction_commit(); | |
1227 | } | |
1228 | } | |
1229 | ||
1230 | void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode) | |
1231 | { | |
1232 | if (mr->romd_mode != romd_mode) { | |
1233 | memory_region_transaction_begin(); | |
1234 | mr->romd_mode = romd_mode; | |
1235 | memory_region_update_pending |= mr->enabled; | |
1236 | memory_region_transaction_commit(); | |
1237 | } | |
1238 | } | |
1239 | ||
1240 | void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, | |
1241 | hwaddr size, unsigned client) | |
1242 | { | |
1243 | assert(mr->terminates); | |
1244 | cpu_physical_memory_reset_dirty(mr->ram_addr + addr, | |
1245 | mr->ram_addr + addr + size, | |
1246 | 1 << client); | |
1247 | } | |
1248 | ||
1249 | void *memory_region_get_ram_ptr(MemoryRegion *mr) | |
1250 | { | |
1251 | if (mr->alias) { | |
1252 | return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset; | |
1253 | } | |
1254 | ||
1255 | assert(mr->terminates); | |
1256 | ||
1257 | return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK); | |
1258 | } | |
1259 | ||
1260 | static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as) | |
1261 | { | |
1262 | FlatView *view; | |
1263 | FlatRange *fr; | |
1264 | CoalescedMemoryRange *cmr; | |
1265 | AddrRange tmp; | |
1266 | MemoryRegionSection section; | |
1267 | ||
1268 | view = address_space_get_flatview(as); | |
1269 | FOR_EACH_FLAT_RANGE(fr, view) { | |
1270 | if (fr->mr == mr) { | |
1271 | section = (MemoryRegionSection) { | |
1272 | .address_space = as, | |
1273 | .offset_within_address_space = int128_get64(fr->addr.start), | |
1274 | .size = fr->addr.size, | |
1275 | }; | |
1276 | ||
1277 | MEMORY_LISTENER_CALL(coalesced_mmio_del, Reverse, §ion, | |
1278 | int128_get64(fr->addr.start), | |
1279 | int128_get64(fr->addr.size)); | |
1280 | QTAILQ_FOREACH(cmr, &mr->coalesced, link) { | |
1281 | tmp = addrrange_shift(cmr->addr, | |
1282 | int128_sub(fr->addr.start, | |
1283 | int128_make64(fr->offset_in_region))); | |
1284 | if (!addrrange_intersects(tmp, fr->addr)) { | |
1285 | continue; | |
1286 | } | |
1287 | tmp = addrrange_intersection(tmp, fr->addr); | |
1288 | MEMORY_LISTENER_CALL(coalesced_mmio_add, Forward, §ion, | |
1289 | int128_get64(tmp.start), | |
1290 | int128_get64(tmp.size)); | |
1291 | } | |
1292 | } | |
1293 | } | |
1294 | flatview_unref(view); | |
1295 | } | |
1296 | ||
1297 | static void memory_region_update_coalesced_range(MemoryRegion *mr) | |
1298 | { | |
1299 | AddressSpace *as; | |
1300 | ||
1301 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { | |
1302 | memory_region_update_coalesced_range_as(mr, as); | |
1303 | } | |
1304 | } | |
1305 | ||
1306 | void memory_region_set_coalescing(MemoryRegion *mr) | |
1307 | { | |
1308 | memory_region_clear_coalescing(mr); | |
1309 | memory_region_add_coalescing(mr, 0, int128_get64(mr->size)); | |
1310 | } | |
1311 | ||
1312 | void memory_region_add_coalescing(MemoryRegion *mr, | |
1313 | hwaddr offset, | |
1314 | uint64_t size) | |
1315 | { | |
1316 | CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr)); | |
1317 | ||
1318 | cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size)); | |
1319 | QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link); | |
1320 | memory_region_update_coalesced_range(mr); | |
1321 | memory_region_set_flush_coalesced(mr); | |
1322 | } | |
1323 | ||
1324 | void memory_region_clear_coalescing(MemoryRegion *mr) | |
1325 | { | |
1326 | CoalescedMemoryRange *cmr; | |
1327 | ||
1328 | qemu_flush_coalesced_mmio_buffer(); | |
1329 | mr->flush_coalesced_mmio = false; | |
1330 | ||
1331 | while (!QTAILQ_EMPTY(&mr->coalesced)) { | |
1332 | cmr = QTAILQ_FIRST(&mr->coalesced); | |
1333 | QTAILQ_REMOVE(&mr->coalesced, cmr, link); | |
1334 | g_free(cmr); | |
1335 | } | |
1336 | memory_region_update_coalesced_range(mr); | |
1337 | } | |
1338 | ||
1339 | void memory_region_set_flush_coalesced(MemoryRegion *mr) | |
1340 | { | |
1341 | mr->flush_coalesced_mmio = true; | |
1342 | } | |
1343 | ||
1344 | void memory_region_clear_flush_coalesced(MemoryRegion *mr) | |
1345 | { | |
1346 | qemu_flush_coalesced_mmio_buffer(); | |
1347 | if (QTAILQ_EMPTY(&mr->coalesced)) { | |
1348 | mr->flush_coalesced_mmio = false; | |
1349 | } | |
1350 | } | |
1351 | ||
1352 | void memory_region_add_eventfd(MemoryRegion *mr, | |
1353 | hwaddr addr, | |
1354 | unsigned size, | |
1355 | bool match_data, | |
1356 | uint64_t data, | |
1357 | EventNotifier *e) | |
1358 | { | |
1359 | MemoryRegionIoeventfd mrfd = { | |
1360 | .addr.start = int128_make64(addr), | |
1361 | .addr.size = int128_make64(size), | |
1362 | .match_data = match_data, | |
1363 | .data = data, | |
1364 | .e = e, | |
1365 | }; | |
1366 | unsigned i; | |
1367 | ||
1368 | adjust_endianness(mr, &mrfd.data, size); | |
1369 | memory_region_transaction_begin(); | |
1370 | for (i = 0; i < mr->ioeventfd_nb; ++i) { | |
1371 | if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) { | |
1372 | break; | |
1373 | } | |
1374 | } | |
1375 | ++mr->ioeventfd_nb; | |
1376 | mr->ioeventfds = g_realloc(mr->ioeventfds, | |
1377 | sizeof(*mr->ioeventfds) * mr->ioeventfd_nb); | |
1378 | memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i], | |
1379 | sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i)); | |
1380 | mr->ioeventfds[i] = mrfd; | |
1381 | memory_region_update_pending |= mr->enabled; | |
1382 | memory_region_transaction_commit(); | |
1383 | } | |
1384 | ||
1385 | void memory_region_del_eventfd(MemoryRegion *mr, | |
1386 | hwaddr addr, | |
1387 | unsigned size, | |
1388 | bool match_data, | |
1389 | uint64_t data, | |
1390 | EventNotifier *e) | |
1391 | { | |
1392 | MemoryRegionIoeventfd mrfd = { | |
1393 | .addr.start = int128_make64(addr), | |
1394 | .addr.size = int128_make64(size), | |
1395 | .match_data = match_data, | |
1396 | .data = data, | |
1397 | .e = e, | |
1398 | }; | |
1399 | unsigned i; | |
1400 | ||
1401 | adjust_endianness(mr, &mrfd.data, size); | |
1402 | memory_region_transaction_begin(); | |
1403 | for (i = 0; i < mr->ioeventfd_nb; ++i) { | |
1404 | if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) { | |
1405 | break; | |
1406 | } | |
1407 | } | |
1408 | assert(i != mr->ioeventfd_nb); | |
1409 | memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1], | |
1410 | sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1))); | |
1411 | --mr->ioeventfd_nb; | |
1412 | mr->ioeventfds = g_realloc(mr->ioeventfds, | |
1413 | sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1); | |
1414 | memory_region_update_pending |= mr->enabled; | |
1415 | memory_region_transaction_commit(); | |
1416 | } | |
1417 | ||
1418 | static void memory_region_add_subregion_common(MemoryRegion *mr, | |
1419 | hwaddr offset, | |
1420 | MemoryRegion *subregion) | |
1421 | { | |
1422 | MemoryRegion *other; | |
1423 | ||
1424 | memory_region_transaction_begin(); | |
1425 | ||
1426 | assert(!subregion->parent); | |
1427 | memory_region_ref(subregion); | |
1428 | subregion->parent = mr; | |
1429 | subregion->addr = offset; | |
1430 | QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { | |
1431 | if (subregion->may_overlap || other->may_overlap) { | |
1432 | continue; | |
1433 | } | |
1434 | if (int128_ge(int128_make64(offset), | |
1435 | int128_add(int128_make64(other->addr), other->size)) | |
1436 | || int128_le(int128_add(int128_make64(offset), subregion->size), | |
1437 | int128_make64(other->addr))) { | |
1438 | continue; | |
1439 | } | |
1440 | #if 0 | |
1441 | printf("warning: subregion collision %llx/%llx (%s) " | |
1442 | "vs %llx/%llx (%s)\n", | |
1443 | (unsigned long long)offset, | |
1444 | (unsigned long long)int128_get64(subregion->size), | |
1445 | subregion->name, | |
1446 | (unsigned long long)other->addr, | |
1447 | (unsigned long long)int128_get64(other->size), | |
1448 | other->name); | |
1449 | #endif | |
1450 | } | |
1451 | QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { | |
1452 | if (subregion->priority >= other->priority) { | |
1453 | QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); | |
1454 | goto done; | |
1455 | } | |
1456 | } | |
1457 | QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); | |
1458 | done: | |
1459 | memory_region_update_pending |= mr->enabled && subregion->enabled; | |
1460 | memory_region_transaction_commit(); | |
1461 | } | |
1462 | ||
1463 | ||
1464 | void memory_region_add_subregion(MemoryRegion *mr, | |
1465 | hwaddr offset, | |
1466 | MemoryRegion *subregion) | |
1467 | { | |
1468 | subregion->may_overlap = false; | |
1469 | subregion->priority = 0; | |
1470 | memory_region_add_subregion_common(mr, offset, subregion); | |
1471 | } | |
1472 | ||
1473 | void memory_region_add_subregion_overlap(MemoryRegion *mr, | |
1474 | hwaddr offset, | |
1475 | MemoryRegion *subregion, | |
1476 | unsigned priority) | |
1477 | { | |
1478 | subregion->may_overlap = true; | |
1479 | subregion->priority = priority; | |
1480 | memory_region_add_subregion_common(mr, offset, subregion); | |
1481 | } | |
1482 | ||
1483 | void memory_region_del_subregion(MemoryRegion *mr, | |
1484 | MemoryRegion *subregion) | |
1485 | { | |
1486 | memory_region_transaction_begin(); | |
1487 | assert(subregion->parent == mr); | |
1488 | subregion->parent = NULL; | |
1489 | QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); | |
1490 | memory_region_unref(subregion); | |
1491 | memory_region_update_pending |= mr->enabled && subregion->enabled; | |
1492 | memory_region_transaction_commit(); | |
1493 | } | |
1494 | ||
1495 | void memory_region_set_enabled(MemoryRegion *mr, bool enabled) | |
1496 | { | |
1497 | if (enabled == mr->enabled) { | |
1498 | return; | |
1499 | } | |
1500 | memory_region_transaction_begin(); | |
1501 | mr->enabled = enabled; | |
1502 | memory_region_update_pending = true; | |
1503 | memory_region_transaction_commit(); | |
1504 | } | |
1505 | ||
1506 | void memory_region_set_address(MemoryRegion *mr, hwaddr addr) | |
1507 | { | |
1508 | MemoryRegion *parent = mr->parent; | |
1509 | unsigned priority = mr->priority; | |
1510 | bool may_overlap = mr->may_overlap; | |
1511 | ||
1512 | if (addr == mr->addr || !parent) { | |
1513 | mr->addr = addr; | |
1514 | return; | |
1515 | } | |
1516 | ||
1517 | memory_region_transaction_begin(); | |
1518 | memory_region_ref(mr); | |
1519 | memory_region_del_subregion(parent, mr); | |
1520 | if (may_overlap) { | |
1521 | memory_region_add_subregion_overlap(parent, addr, mr, priority); | |
1522 | } else { | |
1523 | memory_region_add_subregion(parent, addr, mr); | |
1524 | } | |
1525 | memory_region_unref(mr); | |
1526 | memory_region_transaction_commit(); | |
1527 | } | |
1528 | ||
1529 | void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset) | |
1530 | { | |
1531 | assert(mr->alias); | |
1532 | ||
1533 | if (offset == mr->alias_offset) { | |
1534 | return; | |
1535 | } | |
1536 | ||
1537 | memory_region_transaction_begin(); | |
1538 | mr->alias_offset = offset; | |
1539 | memory_region_update_pending |= mr->enabled; | |
1540 | memory_region_transaction_commit(); | |
1541 | } | |
1542 | ||
1543 | ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr) | |
1544 | { | |
1545 | return mr->ram_addr; | |
1546 | } | |
1547 | ||
1548 | static int cmp_flatrange_addr(const void *addr_, const void *fr_) | |
1549 | { | |
1550 | const AddrRange *addr = addr_; | |
1551 | const FlatRange *fr = fr_; | |
1552 | ||
1553 | if (int128_le(addrrange_end(*addr), fr->addr.start)) { | |
1554 | return -1; | |
1555 | } else if (int128_ge(addr->start, addrrange_end(fr->addr))) { | |
1556 | return 1; | |
1557 | } | |
1558 | return 0; | |
1559 | } | |
1560 | ||
1561 | static FlatRange *flatview_lookup(FlatView *view, AddrRange addr) | |
1562 | { | |
1563 | return bsearch(&addr, view->ranges, view->nr, | |
1564 | sizeof(FlatRange), cmp_flatrange_addr); | |
1565 | } | |
1566 | ||
1567 | bool memory_region_present(MemoryRegion *parent, hwaddr addr) | |
1568 | { | |
1569 | MemoryRegion *mr = memory_region_find(parent, addr, 1).mr; | |
1570 | if (!mr) { | |
1571 | return false; | |
1572 | } | |
1573 | memory_region_unref(mr); | |
1574 | return true; | |
1575 | } | |
1576 | ||
1577 | MemoryRegionSection memory_region_find(MemoryRegion *mr, | |
1578 | hwaddr addr, uint64_t size) | |
1579 | { | |
1580 | MemoryRegionSection ret = { .mr = NULL }; | |
1581 | MemoryRegion *root; | |
1582 | AddressSpace *as; | |
1583 | AddrRange range; | |
1584 | FlatView *view; | |
1585 | FlatRange *fr; | |
1586 | ||
1587 | addr += mr->addr; | |
1588 | for (root = mr; root->parent; ) { | |
1589 | root = root->parent; | |
1590 | addr += root->addr; | |
1591 | } | |
1592 | ||
1593 | as = memory_region_to_address_space(root); | |
1594 | range = addrrange_make(int128_make64(addr), int128_make64(size)); | |
1595 | ||
1596 | view = address_space_get_flatview(as); | |
1597 | fr = flatview_lookup(view, range); | |
1598 | if (!fr) { | |
1599 | return ret; | |
1600 | } | |
1601 | ||
1602 | while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) { | |
1603 | --fr; | |
1604 | } | |
1605 | ||
1606 | ret.mr = fr->mr; | |
1607 | ret.address_space = as; | |
1608 | range = addrrange_intersection(range, fr->addr); | |
1609 | ret.offset_within_region = fr->offset_in_region; | |
1610 | ret.offset_within_region += int128_get64(int128_sub(range.start, | |
1611 | fr->addr.start)); | |
1612 | ret.size = range.size; | |
1613 | ret.offset_within_address_space = int128_get64(range.start); | |
1614 | ret.readonly = fr->readonly; | |
1615 | memory_region_ref(ret.mr); | |
1616 | ||
1617 | flatview_unref(view); | |
1618 | return ret; | |
1619 | } | |
1620 | ||
1621 | void address_space_sync_dirty_bitmap(AddressSpace *as) | |
1622 | { | |
1623 | FlatView *view; | |
1624 | FlatRange *fr; | |
1625 | ||
1626 | view = address_space_get_flatview(as); | |
1627 | FOR_EACH_FLAT_RANGE(fr, view) { | |
1628 | MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync); | |
1629 | } | |
1630 | flatview_unref(view); | |
1631 | } | |
1632 | ||
1633 | void memory_global_dirty_log_start(void) | |
1634 | { | |
1635 | global_dirty_log = true; | |
1636 | MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward); | |
1637 | } | |
1638 | ||
1639 | void memory_global_dirty_log_stop(void) | |
1640 | { | |
1641 | global_dirty_log = false; | |
1642 | MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse); | |
1643 | } | |
1644 | ||
1645 | static void listener_add_address_space(MemoryListener *listener, | |
1646 | AddressSpace *as) | |
1647 | { | |
1648 | FlatView *view; | |
1649 | FlatRange *fr; | |
1650 | ||
1651 | if (listener->address_space_filter | |
1652 | && listener->address_space_filter != as) { | |
1653 | return; | |
1654 | } | |
1655 | ||
1656 | if (global_dirty_log) { | |
1657 | if (listener->log_global_start) { | |
1658 | listener->log_global_start(listener); | |
1659 | } | |
1660 | } | |
1661 | ||
1662 | view = address_space_get_flatview(as); | |
1663 | FOR_EACH_FLAT_RANGE(fr, view) { | |
1664 | MemoryRegionSection section = { | |
1665 | .mr = fr->mr, | |
1666 | .address_space = as, | |
1667 | .offset_within_region = fr->offset_in_region, | |
1668 | .size = fr->addr.size, | |
1669 | .offset_within_address_space = int128_get64(fr->addr.start), | |
1670 | .readonly = fr->readonly, | |
1671 | }; | |
1672 | if (listener->region_add) { | |
1673 | listener->region_add(listener, §ion); | |
1674 | } | |
1675 | } | |
1676 | flatview_unref(view); | |
1677 | } | |
1678 | ||
1679 | void memory_listener_register(MemoryListener *listener, AddressSpace *filter) | |
1680 | { | |
1681 | MemoryListener *other = NULL; | |
1682 | AddressSpace *as; | |
1683 | ||
1684 | listener->address_space_filter = filter; | |
1685 | if (QTAILQ_EMPTY(&memory_listeners) | |
1686 | || listener->priority >= QTAILQ_LAST(&memory_listeners, | |
1687 | memory_listeners)->priority) { | |
1688 | QTAILQ_INSERT_TAIL(&memory_listeners, listener, link); | |
1689 | } else { | |
1690 | QTAILQ_FOREACH(other, &memory_listeners, link) { | |
1691 | if (listener->priority < other->priority) { | |
1692 | break; | |
1693 | } | |
1694 | } | |
1695 | QTAILQ_INSERT_BEFORE(other, listener, link); | |
1696 | } | |
1697 | ||
1698 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { | |
1699 | listener_add_address_space(listener, as); | |
1700 | } | |
1701 | } | |
1702 | ||
1703 | void memory_listener_unregister(MemoryListener *listener) | |
1704 | { | |
1705 | QTAILQ_REMOVE(&memory_listeners, listener, link); | |
1706 | } | |
1707 | ||
1708 | void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) | |
1709 | { | |
1710 | if (QTAILQ_EMPTY(&address_spaces)) { | |
1711 | memory_init(); | |
1712 | } | |
1713 | ||
1714 | memory_region_transaction_begin(); | |
1715 | as->root = root; | |
1716 | as->current_map = g_new(FlatView, 1); | |
1717 | flatview_init(as->current_map); | |
1718 | as->ioeventfd_nb = 0; | |
1719 | as->ioeventfds = NULL; | |
1720 | QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link); | |
1721 | as->name = g_strdup(name ? name : "anonymous"); | |
1722 | address_space_init_dispatch(as); | |
1723 | memory_region_update_pending |= root->enabled; | |
1724 | memory_region_transaction_commit(); | |
1725 | } | |
1726 | ||
1727 | void address_space_destroy(AddressSpace *as) | |
1728 | { | |
1729 | /* Flush out anything from MemoryListeners listening in on this */ | |
1730 | memory_region_transaction_begin(); | |
1731 | as->root = NULL; | |
1732 | memory_region_transaction_commit(); | |
1733 | QTAILQ_REMOVE(&address_spaces, as, address_spaces_link); | |
1734 | address_space_destroy_dispatch(as); | |
1735 | flatview_unref(as->current_map); | |
1736 | g_free(as->name); | |
1737 | g_free(as->ioeventfds); | |
1738 | } | |
1739 | ||
1740 | bool io_mem_read(MemoryRegion *mr, hwaddr addr, uint64_t *pval, unsigned size) | |
1741 | { | |
1742 | return memory_region_dispatch_read(mr, addr, pval, size); | |
1743 | } | |
1744 | ||
1745 | bool io_mem_write(MemoryRegion *mr, hwaddr addr, | |
1746 | uint64_t val, unsigned size) | |
1747 | { | |
1748 | return memory_region_dispatch_write(mr, addr, val, size); | |
1749 | } | |
1750 | ||
1751 | typedef struct MemoryRegionList MemoryRegionList; | |
1752 | ||
1753 | struct MemoryRegionList { | |
1754 | const MemoryRegion *mr; | |
1755 | bool printed; | |
1756 | QTAILQ_ENTRY(MemoryRegionList) queue; | |
1757 | }; | |
1758 | ||
1759 | typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead; | |
1760 | ||
1761 | static void mtree_print_mr(fprintf_function mon_printf, void *f, | |
1762 | const MemoryRegion *mr, unsigned int level, | |
1763 | hwaddr base, | |
1764 | MemoryRegionListHead *alias_print_queue) | |
1765 | { | |
1766 | MemoryRegionList *new_ml, *ml, *next_ml; | |
1767 | MemoryRegionListHead submr_print_queue; | |
1768 | const MemoryRegion *submr; | |
1769 | unsigned int i; | |
1770 | ||
1771 | if (!mr || !mr->enabled) { | |
1772 | return; | |
1773 | } | |
1774 | ||
1775 | for (i = 0; i < level; i++) { | |
1776 | mon_printf(f, " "); | |
1777 | } | |
1778 | ||
1779 | if (mr->alias) { | |
1780 | MemoryRegionList *ml; | |
1781 | bool found = false; | |
1782 | ||
1783 | /* check if the alias is already in the queue */ | |
1784 | QTAILQ_FOREACH(ml, alias_print_queue, queue) { | |
1785 | if (ml->mr == mr->alias && !ml->printed) { | |
1786 | found = true; | |
1787 | } | |
1788 | } | |
1789 | ||
1790 | if (!found) { | |
1791 | ml = g_new(MemoryRegionList, 1); | |
1792 | ml->mr = mr->alias; | |
1793 | ml->printed = false; | |
1794 | QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue); | |
1795 | } | |
1796 | mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx | |
1797 | " (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx | |
1798 | "-" TARGET_FMT_plx "\n", | |
1799 | base + mr->addr, | |
1800 | base + mr->addr | |
1801 | + (int128_nz(mr->size) ? | |
1802 | (hwaddr)int128_get64(int128_sub(mr->size, | |
1803 | int128_one())) : 0), | |
1804 | mr->priority, | |
1805 | mr->romd_mode ? 'R' : '-', | |
1806 | !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W' | |
1807 | : '-', | |
1808 | mr->name, | |
1809 | mr->alias->name, | |
1810 | mr->alias_offset, | |
1811 | mr->alias_offset | |
1812 | + (int128_nz(mr->size) ? | |
1813 | (hwaddr)int128_get64(int128_sub(mr->size, | |
1814 | int128_one())) : 0)); | |
1815 | } else { | |
1816 | mon_printf(f, | |
1817 | TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s\n", | |
1818 | base + mr->addr, | |
1819 | base + mr->addr | |
1820 | + (int128_nz(mr->size) ? | |
1821 | (hwaddr)int128_get64(int128_sub(mr->size, | |
1822 | int128_one())) : 0), | |
1823 | mr->priority, | |
1824 | mr->romd_mode ? 'R' : '-', | |
1825 | !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W' | |
1826 | : '-', | |
1827 | mr->name); | |
1828 | } | |
1829 | ||
1830 | QTAILQ_INIT(&submr_print_queue); | |
1831 | ||
1832 | QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) { | |
1833 | new_ml = g_new(MemoryRegionList, 1); | |
1834 | new_ml->mr = submr; | |
1835 | QTAILQ_FOREACH(ml, &submr_print_queue, queue) { | |
1836 | if (new_ml->mr->addr < ml->mr->addr || | |
1837 | (new_ml->mr->addr == ml->mr->addr && | |
1838 | new_ml->mr->priority > ml->mr->priority)) { | |
1839 | QTAILQ_INSERT_BEFORE(ml, new_ml, queue); | |
1840 | new_ml = NULL; | |
1841 | break; | |
1842 | } | |
1843 | } | |
1844 | if (new_ml) { | |
1845 | QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue); | |
1846 | } | |
1847 | } | |
1848 | ||
1849 | QTAILQ_FOREACH(ml, &submr_print_queue, queue) { | |
1850 | mtree_print_mr(mon_printf, f, ml->mr, level + 1, base + mr->addr, | |
1851 | alias_print_queue); | |
1852 | } | |
1853 | ||
1854 | QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) { | |
1855 | g_free(ml); | |
1856 | } | |
1857 | } | |
1858 | ||
1859 | void mtree_info(fprintf_function mon_printf, void *f) | |
1860 | { | |
1861 | MemoryRegionListHead ml_head; | |
1862 | MemoryRegionList *ml, *ml2; | |
1863 | AddressSpace *as; | |
1864 | ||
1865 | QTAILQ_INIT(&ml_head); | |
1866 | ||
1867 | QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { | |
1868 | mon_printf(f, "%s\n", as->name); | |
1869 | mtree_print_mr(mon_printf, f, as->root, 0, 0, &ml_head); | |
1870 | } | |
1871 | ||
1872 | mon_printf(f, "aliases\n"); | |
1873 | /* print aliased regions */ | |
1874 | QTAILQ_FOREACH(ml, &ml_head, queue) { | |
1875 | if (!ml->printed) { | |
1876 | mon_printf(f, "%s\n", ml->mr->name); | |
1877 | mtree_print_mr(mon_printf, f, ml->mr, 0, 0, &ml_head); | |
1878 | } | |
1879 | } | |
1880 | ||
1881 | QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) { | |
1882 | g_free(ml); | |
1883 | } | |
1884 | } |