]>
Commit | Line | Data |
---|---|---|
093bc2cd AK |
1 | /* |
2 | * Physical memory management | |
3 | * | |
4 | * Copyright 2011 Red Hat, Inc. and/or its affiliates | |
5 | * | |
6 | * Authors: | |
7 | * Avi Kivity <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
14 | #include "memory.h" | |
1c0ffa58 | 15 | #include "exec-memory.h" |
658b2224 | 16 | #include "ioport.h" |
74901c3b | 17 | #include "bitops.h" |
3e9d69e7 | 18 | #include "kvm.h" |
093bc2cd AK |
19 | #include <assert.h> |
20 | ||
4ef4db86 AK |
21 | unsigned memory_region_transaction_depth = 0; |
22 | ||
093bc2cd AK |
23 | typedef struct AddrRange AddrRange; |
24 | ||
25 | struct AddrRange { | |
26 | uint64_t start; | |
27 | uint64_t size; | |
28 | }; | |
29 | ||
30 | static AddrRange addrrange_make(uint64_t start, uint64_t size) | |
31 | { | |
32 | return (AddrRange) { start, size }; | |
33 | } | |
34 | ||
35 | static bool addrrange_equal(AddrRange r1, AddrRange r2) | |
36 | { | |
37 | return r1.start == r2.start && r1.size == r2.size; | |
38 | } | |
39 | ||
40 | static uint64_t addrrange_end(AddrRange r) | |
41 | { | |
42 | return r.start + r.size; | |
43 | } | |
44 | ||
45 | static AddrRange addrrange_shift(AddrRange range, int64_t delta) | |
46 | { | |
47 | range.start += delta; | |
48 | return range; | |
49 | } | |
50 | ||
51 | static bool addrrange_intersects(AddrRange r1, AddrRange r2) | |
52 | { | |
53 | return (r1.start >= r2.start && r1.start < r2.start + r2.size) | |
54 | || (r2.start >= r1.start && r2.start < r1.start + r1.size); | |
55 | } | |
56 | ||
57 | static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2) | |
58 | { | |
59 | uint64_t start = MAX(r1.start, r2.start); | |
60 | /* off-by-one arithmetic to prevent overflow */ | |
61 | uint64_t end = MIN(addrrange_end(r1) - 1, addrrange_end(r2) - 1); | |
62 | return addrrange_make(start, end - start + 1); | |
63 | } | |
64 | ||
65 | struct CoalescedMemoryRange { | |
66 | AddrRange addr; | |
67 | QTAILQ_ENTRY(CoalescedMemoryRange) link; | |
68 | }; | |
69 | ||
3e9d69e7 AK |
70 | struct MemoryRegionIoeventfd { |
71 | AddrRange addr; | |
72 | bool match_data; | |
73 | uint64_t data; | |
74 | int fd; | |
75 | }; | |
76 | ||
77 | static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a, | |
78 | MemoryRegionIoeventfd b) | |
79 | { | |
80 | if (a.addr.start < b.addr.start) { | |
81 | return true; | |
82 | } else if (a.addr.start > b.addr.start) { | |
83 | return false; | |
84 | } else if (a.addr.size < b.addr.size) { | |
85 | return true; | |
86 | } else if (a.addr.size > b.addr.size) { | |
87 | return false; | |
88 | } else if (a.match_data < b.match_data) { | |
89 | return true; | |
90 | } else if (a.match_data > b.match_data) { | |
91 | return false; | |
92 | } else if (a.match_data) { | |
93 | if (a.data < b.data) { | |
94 | return true; | |
95 | } else if (a.data > b.data) { | |
96 | return false; | |
97 | } | |
98 | } | |
99 | if (a.fd < b.fd) { | |
100 | return true; | |
101 | } else if (a.fd > b.fd) { | |
102 | return false; | |
103 | } | |
104 | return false; | |
105 | } | |
106 | ||
107 | static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a, | |
108 | MemoryRegionIoeventfd b) | |
109 | { | |
110 | return !memory_region_ioeventfd_before(a, b) | |
111 | && !memory_region_ioeventfd_before(b, a); | |
112 | } | |
113 | ||
093bc2cd AK |
114 | typedef struct FlatRange FlatRange; |
115 | typedef struct FlatView FlatView; | |
116 | ||
117 | /* Range of memory in the global map. Addresses are absolute. */ | |
118 | struct FlatRange { | |
119 | MemoryRegion *mr; | |
120 | target_phys_addr_t offset_in_region; | |
121 | AddrRange addr; | |
5a583347 | 122 | uint8_t dirty_log_mask; |
093bc2cd AK |
123 | }; |
124 | ||
125 | /* Flattened global view of current active memory hierarchy. Kept in sorted | |
126 | * order. | |
127 | */ | |
128 | struct FlatView { | |
129 | FlatRange *ranges; | |
130 | unsigned nr; | |
131 | unsigned nr_allocated; | |
132 | }; | |
133 | ||
cc31e6e7 AK |
134 | typedef struct AddressSpace AddressSpace; |
135 | typedef struct AddressSpaceOps AddressSpaceOps; | |
136 | ||
137 | /* A system address space - I/O, memory, etc. */ | |
138 | struct AddressSpace { | |
139 | const AddressSpaceOps *ops; | |
140 | MemoryRegion *root; | |
141 | FlatView current_map; | |
3e9d69e7 AK |
142 | int ioeventfd_nb; |
143 | MemoryRegionIoeventfd *ioeventfds; | |
cc31e6e7 AK |
144 | }; |
145 | ||
146 | struct AddressSpaceOps { | |
147 | void (*range_add)(AddressSpace *as, FlatRange *fr); | |
148 | void (*range_del)(AddressSpace *as, FlatRange *fr); | |
149 | void (*log_start)(AddressSpace *as, FlatRange *fr); | |
150 | void (*log_stop)(AddressSpace *as, FlatRange *fr); | |
3e9d69e7 AK |
151 | void (*ioeventfd_add)(AddressSpace *as, MemoryRegionIoeventfd *fd); |
152 | void (*ioeventfd_del)(AddressSpace *as, MemoryRegionIoeventfd *fd); | |
cc31e6e7 AK |
153 | }; |
154 | ||
093bc2cd AK |
155 | #define FOR_EACH_FLAT_RANGE(var, view) \ |
156 | for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var) | |
157 | ||
093bc2cd AK |
158 | static bool flatrange_equal(FlatRange *a, FlatRange *b) |
159 | { | |
160 | return a->mr == b->mr | |
161 | && addrrange_equal(a->addr, b->addr) | |
162 | && a->offset_in_region == b->offset_in_region; | |
163 | } | |
164 | ||
165 | static void flatview_init(FlatView *view) | |
166 | { | |
167 | view->ranges = NULL; | |
168 | view->nr = 0; | |
169 | view->nr_allocated = 0; | |
170 | } | |
171 | ||
172 | /* Insert a range into a given position. Caller is responsible for maintaining | |
173 | * sorting order. | |
174 | */ | |
175 | static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) | |
176 | { | |
177 | if (view->nr == view->nr_allocated) { | |
178 | view->nr_allocated = MAX(2 * view->nr, 10); | |
179 | view->ranges = qemu_realloc(view->ranges, | |
180 | view->nr_allocated * sizeof(*view->ranges)); | |
181 | } | |
182 | memmove(view->ranges + pos + 1, view->ranges + pos, | |
183 | (view->nr - pos) * sizeof(FlatRange)); | |
184 | view->ranges[pos] = *range; | |
185 | ++view->nr; | |
186 | } | |
187 | ||
188 | static void flatview_destroy(FlatView *view) | |
189 | { | |
190 | qemu_free(view->ranges); | |
191 | } | |
192 | ||
3d8e6bf9 AK |
193 | static bool can_merge(FlatRange *r1, FlatRange *r2) |
194 | { | |
195 | return addrrange_end(r1->addr) == r2->addr.start | |
196 | && r1->mr == r2->mr | |
197 | && r1->offset_in_region + r1->addr.size == r2->offset_in_region | |
198 | && r1->dirty_log_mask == r2->dirty_log_mask; | |
199 | } | |
200 | ||
201 | /* Attempt to simplify a view by merging ajacent ranges */ | |
202 | static void flatview_simplify(FlatView *view) | |
203 | { | |
204 | unsigned i, j; | |
205 | ||
206 | i = 0; | |
207 | while (i < view->nr) { | |
208 | j = i + 1; | |
209 | while (j < view->nr | |
210 | && can_merge(&view->ranges[j-1], &view->ranges[j])) { | |
211 | view->ranges[i].addr.size += view->ranges[j].addr.size; | |
212 | ++j; | |
213 | } | |
214 | ++i; | |
215 | memmove(&view->ranges[i], &view->ranges[j], | |
216 | (view->nr - j) * sizeof(view->ranges[j])); | |
217 | view->nr -= j - i; | |
218 | } | |
219 | } | |
220 | ||
16ef61c9 AK |
221 | static void memory_region_prepare_ram_addr(MemoryRegion *mr); |
222 | ||
cc31e6e7 AK |
223 | static void as_memory_range_add(AddressSpace *as, FlatRange *fr) |
224 | { | |
225 | ram_addr_t phys_offset, region_offset; | |
226 | ||
16ef61c9 AK |
227 | memory_region_prepare_ram_addr(fr->mr); |
228 | ||
cc31e6e7 AK |
229 | phys_offset = fr->mr->ram_addr; |
230 | region_offset = fr->offset_in_region; | |
231 | /* cpu_register_physical_memory_log() wants region_offset for | |
232 | * mmio, but prefers offseting phys_offset for RAM. Humour it. | |
233 | */ | |
234 | if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) { | |
235 | phys_offset += region_offset; | |
236 | region_offset = 0; | |
237 | } | |
238 | ||
239 | cpu_register_physical_memory_log(fr->addr.start, | |
240 | fr->addr.size, | |
241 | phys_offset, | |
242 | region_offset, | |
243 | fr->dirty_log_mask); | |
244 | } | |
245 | ||
246 | static void as_memory_range_del(AddressSpace *as, FlatRange *fr) | |
247 | { | |
248 | cpu_register_physical_memory(fr->addr.start, fr->addr.size, | |
249 | IO_MEM_UNASSIGNED); | |
250 | } | |
251 | ||
252 | static void as_memory_log_start(AddressSpace *as, FlatRange *fr) | |
253 | { | |
254 | cpu_physical_log_start(fr->addr.start, fr->addr.size); | |
255 | } | |
256 | ||
257 | static void as_memory_log_stop(AddressSpace *as, FlatRange *fr) | |
258 | { | |
259 | cpu_physical_log_stop(fr->addr.start, fr->addr.size); | |
260 | } | |
261 | ||
3e9d69e7 AK |
262 | static void as_memory_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd) |
263 | { | |
264 | int r; | |
265 | ||
266 | assert(fd->match_data && fd->addr.size == 4); | |
267 | ||
268 | r = kvm_set_ioeventfd_mmio_long(fd->fd, fd->addr.start, fd->data, true); | |
269 | if (r < 0) { | |
270 | abort(); | |
271 | } | |
272 | } | |
273 | ||
274 | static void as_memory_ioeventfd_del(AddressSpace *as, MemoryRegionIoeventfd *fd) | |
275 | { | |
276 | int r; | |
277 | ||
278 | r = kvm_set_ioeventfd_mmio_long(fd->fd, fd->addr.start, fd->data, false); | |
279 | if (r < 0) { | |
280 | abort(); | |
281 | } | |
282 | } | |
283 | ||
cc31e6e7 AK |
284 | static const AddressSpaceOps address_space_ops_memory = { |
285 | .range_add = as_memory_range_add, | |
286 | .range_del = as_memory_range_del, | |
287 | .log_start = as_memory_log_start, | |
288 | .log_stop = as_memory_log_stop, | |
3e9d69e7 AK |
289 | .ioeventfd_add = as_memory_ioeventfd_add, |
290 | .ioeventfd_del = as_memory_ioeventfd_del, | |
cc31e6e7 AK |
291 | }; |
292 | ||
293 | static AddressSpace address_space_memory = { | |
294 | .ops = &address_space_ops_memory, | |
295 | }; | |
296 | ||
627a0e90 AK |
297 | static const MemoryRegionPortio *find_portio(MemoryRegion *mr, uint64_t offset, |
298 | unsigned width, bool write) | |
299 | { | |
300 | const MemoryRegionPortio *mrp; | |
301 | ||
302 | for (mrp = mr->ops->old_portio; mrp->size; ++mrp) { | |
303 | if (offset >= mrp->offset && offset < mrp->offset + mrp->len | |
304 | && width == mrp->size | |
305 | && (write ? (bool)mrp->write : (bool)mrp->read)) { | |
306 | return mrp; | |
307 | } | |
308 | } | |
309 | return NULL; | |
310 | } | |
311 | ||
658b2224 AK |
312 | static void memory_region_iorange_read(IORange *iorange, |
313 | uint64_t offset, | |
314 | unsigned width, | |
315 | uint64_t *data) | |
316 | { | |
317 | MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange); | |
318 | ||
627a0e90 AK |
319 | if (mr->ops->old_portio) { |
320 | const MemoryRegionPortio *mrp = find_portio(mr, offset, width, false); | |
321 | ||
322 | *data = ((uint64_t)1 << (width * 8)) - 1; | |
323 | if (mrp) { | |
324 | *data = mrp->read(mr->opaque, offset - mrp->offset); | |
325 | } | |
326 | return; | |
327 | } | |
658b2224 AK |
328 | *data = mr->ops->read(mr->opaque, offset, width); |
329 | } | |
330 | ||
331 | static void memory_region_iorange_write(IORange *iorange, | |
332 | uint64_t offset, | |
333 | unsigned width, | |
334 | uint64_t data) | |
335 | { | |
336 | MemoryRegion *mr = container_of(iorange, MemoryRegion, iorange); | |
337 | ||
627a0e90 AK |
338 | if (mr->ops->old_portio) { |
339 | const MemoryRegionPortio *mrp = find_portio(mr, offset, width, true); | |
340 | ||
341 | if (mrp) { | |
342 | mrp->write(mr->opaque, offset - mrp->offset, data); | |
343 | } | |
344 | return; | |
345 | } | |
658b2224 AK |
346 | mr->ops->write(mr->opaque, offset, data, width); |
347 | } | |
348 | ||
349 | static const IORangeOps memory_region_iorange_ops = { | |
350 | .read = memory_region_iorange_read, | |
351 | .write = memory_region_iorange_write, | |
352 | }; | |
353 | ||
354 | static void as_io_range_add(AddressSpace *as, FlatRange *fr) | |
355 | { | |
356 | iorange_init(&fr->mr->iorange, &memory_region_iorange_ops, | |
357 | fr->addr.start,fr->addr.size); | |
358 | ioport_register(&fr->mr->iorange); | |
359 | } | |
360 | ||
361 | static void as_io_range_del(AddressSpace *as, FlatRange *fr) | |
362 | { | |
363 | isa_unassign_ioport(fr->addr.start, fr->addr.size); | |
364 | } | |
365 | ||
3e9d69e7 AK |
366 | static void as_io_ioeventfd_add(AddressSpace *as, MemoryRegionIoeventfd *fd) |
367 | { | |
368 | int r; | |
369 | ||
370 | assert(fd->match_data && fd->addr.size == 2); | |
371 | ||
372 | r = kvm_set_ioeventfd_pio_word(fd->fd, fd->addr.start, fd->data, true); | |
373 | if (r < 0) { | |
374 | abort(); | |
375 | } | |
376 | } | |
377 | ||
378 | static void as_io_ioeventfd_del(AddressSpace *as, MemoryRegionIoeventfd *fd) | |
379 | { | |
380 | int r; | |
381 | ||
382 | r = kvm_set_ioeventfd_pio_word(fd->fd, fd->addr.start, fd->data, false); | |
383 | if (r < 0) { | |
384 | abort(); | |
385 | } | |
386 | } | |
387 | ||
658b2224 AK |
388 | static const AddressSpaceOps address_space_ops_io = { |
389 | .range_add = as_io_range_add, | |
390 | .range_del = as_io_range_del, | |
3e9d69e7 AK |
391 | .ioeventfd_add = as_io_ioeventfd_add, |
392 | .ioeventfd_del = as_io_ioeventfd_del, | |
658b2224 AK |
393 | }; |
394 | ||
395 | static AddressSpace address_space_io = { | |
396 | .ops = &address_space_ops_io, | |
397 | }; | |
398 | ||
093bc2cd AK |
399 | /* Render a memory region into the global view. Ranges in @view obscure |
400 | * ranges in @mr. | |
401 | */ | |
402 | static void render_memory_region(FlatView *view, | |
403 | MemoryRegion *mr, | |
404 | target_phys_addr_t base, | |
405 | AddrRange clip) | |
406 | { | |
407 | MemoryRegion *subregion; | |
408 | unsigned i; | |
409 | target_phys_addr_t offset_in_region; | |
410 | uint64_t remain; | |
411 | uint64_t now; | |
412 | FlatRange fr; | |
413 | AddrRange tmp; | |
414 | ||
415 | base += mr->addr; | |
416 | ||
417 | tmp = addrrange_make(base, mr->size); | |
418 | ||
419 | if (!addrrange_intersects(tmp, clip)) { | |
420 | return; | |
421 | } | |
422 | ||
423 | clip = addrrange_intersection(tmp, clip); | |
424 | ||
425 | if (mr->alias) { | |
426 | base -= mr->alias->addr; | |
427 | base -= mr->alias_offset; | |
428 | render_memory_region(view, mr->alias, base, clip); | |
429 | return; | |
430 | } | |
431 | ||
432 | /* Render subregions in priority order. */ | |
433 | QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { | |
434 | render_memory_region(view, subregion, base, clip); | |
435 | } | |
436 | ||
14a3c10a | 437 | if (!mr->terminates) { |
093bc2cd AK |
438 | return; |
439 | } | |
440 | ||
441 | offset_in_region = clip.start - base; | |
442 | base = clip.start; | |
443 | remain = clip.size; | |
444 | ||
445 | /* Render the region itself into any gaps left by the current view. */ | |
446 | for (i = 0; i < view->nr && remain; ++i) { | |
447 | if (base >= addrrange_end(view->ranges[i].addr)) { | |
448 | continue; | |
449 | } | |
450 | if (base < view->ranges[i].addr.start) { | |
451 | now = MIN(remain, view->ranges[i].addr.start - base); | |
452 | fr.mr = mr; | |
453 | fr.offset_in_region = offset_in_region; | |
454 | fr.addr = addrrange_make(base, now); | |
5a583347 | 455 | fr.dirty_log_mask = mr->dirty_log_mask; |
093bc2cd AK |
456 | flatview_insert(view, i, &fr); |
457 | ++i; | |
458 | base += now; | |
459 | offset_in_region += now; | |
460 | remain -= now; | |
461 | } | |
462 | if (base == view->ranges[i].addr.start) { | |
463 | now = MIN(remain, view->ranges[i].addr.size); | |
464 | base += now; | |
465 | offset_in_region += now; | |
466 | remain -= now; | |
467 | } | |
468 | } | |
469 | if (remain) { | |
470 | fr.mr = mr; | |
471 | fr.offset_in_region = offset_in_region; | |
472 | fr.addr = addrrange_make(base, remain); | |
5a583347 | 473 | fr.dirty_log_mask = mr->dirty_log_mask; |
093bc2cd AK |
474 | flatview_insert(view, i, &fr); |
475 | } | |
476 | } | |
477 | ||
478 | /* Render a memory topology into a list of disjoint absolute ranges. */ | |
479 | static FlatView generate_memory_topology(MemoryRegion *mr) | |
480 | { | |
481 | FlatView view; | |
482 | ||
483 | flatview_init(&view); | |
484 | ||
485 | render_memory_region(&view, mr, 0, addrrange_make(0, UINT64_MAX)); | |
3d8e6bf9 | 486 | flatview_simplify(&view); |
093bc2cd AK |
487 | |
488 | return view; | |
489 | } | |
490 | ||
3e9d69e7 AK |
491 | static void address_space_add_del_ioeventfds(AddressSpace *as, |
492 | MemoryRegionIoeventfd *fds_new, | |
493 | unsigned fds_new_nb, | |
494 | MemoryRegionIoeventfd *fds_old, | |
495 | unsigned fds_old_nb) | |
496 | { | |
497 | unsigned iold, inew; | |
498 | ||
499 | /* Generate a symmetric difference of the old and new fd sets, adding | |
500 | * and deleting as necessary. | |
501 | */ | |
502 | ||
503 | iold = inew = 0; | |
504 | while (iold < fds_old_nb || inew < fds_new_nb) { | |
505 | if (iold < fds_old_nb | |
506 | && (inew == fds_new_nb | |
507 | || memory_region_ioeventfd_before(fds_old[iold], | |
508 | fds_new[inew]))) { | |
509 | as->ops->ioeventfd_del(as, &fds_old[iold]); | |
510 | ++iold; | |
511 | } else if (inew < fds_new_nb | |
512 | && (iold == fds_old_nb | |
513 | || memory_region_ioeventfd_before(fds_new[inew], | |
514 | fds_old[iold]))) { | |
515 | as->ops->ioeventfd_add(as, &fds_new[inew]); | |
516 | ++inew; | |
517 | } else { | |
518 | ++iold; | |
519 | ++inew; | |
520 | } | |
521 | } | |
522 | } | |
523 | ||
524 | static void address_space_update_ioeventfds(AddressSpace *as) | |
525 | { | |
526 | FlatRange *fr; | |
527 | unsigned ioeventfd_nb = 0; | |
528 | MemoryRegionIoeventfd *ioeventfds = NULL; | |
529 | AddrRange tmp; | |
530 | unsigned i; | |
531 | ||
532 | FOR_EACH_FLAT_RANGE(fr, &as->current_map) { | |
533 | for (i = 0; i < fr->mr->ioeventfd_nb; ++i) { | |
534 | tmp = addrrange_shift(fr->mr->ioeventfds[i].addr, | |
535 | fr->addr.start - fr->offset_in_region); | |
536 | if (addrrange_intersects(fr->addr, tmp)) { | |
537 | ++ioeventfd_nb; | |
538 | ioeventfds = qemu_realloc(ioeventfds, | |
539 | ioeventfd_nb * sizeof(*ioeventfds)); | |
540 | ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i]; | |
541 | ioeventfds[ioeventfd_nb-1].addr = tmp; | |
542 | } | |
543 | } | |
544 | } | |
545 | ||
546 | address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb, | |
547 | as->ioeventfds, as->ioeventfd_nb); | |
548 | ||
549 | qemu_free(as->ioeventfds); | |
550 | as->ioeventfds = ioeventfds; | |
551 | as->ioeventfd_nb = ioeventfd_nb; | |
552 | } | |
553 | ||
b8af1afb AK |
554 | static void address_space_update_topology_pass(AddressSpace *as, |
555 | FlatView old_view, | |
556 | FlatView new_view, | |
557 | bool adding) | |
093bc2cd | 558 | { |
093bc2cd AK |
559 | unsigned iold, inew; |
560 | FlatRange *frold, *frnew; | |
093bc2cd AK |
561 | |
562 | /* Generate a symmetric difference of the old and new memory maps. | |
563 | * Kill ranges in the old map, and instantiate ranges in the new map. | |
564 | */ | |
565 | iold = inew = 0; | |
566 | while (iold < old_view.nr || inew < new_view.nr) { | |
567 | if (iold < old_view.nr) { | |
568 | frold = &old_view.ranges[iold]; | |
569 | } else { | |
570 | frold = NULL; | |
571 | } | |
572 | if (inew < new_view.nr) { | |
573 | frnew = &new_view.ranges[inew]; | |
574 | } else { | |
575 | frnew = NULL; | |
576 | } | |
577 | ||
578 | if (frold | |
579 | && (!frnew | |
580 | || frold->addr.start < frnew->addr.start | |
581 | || (frold->addr.start == frnew->addr.start | |
582 | && !flatrange_equal(frold, frnew)))) { | |
583 | /* In old, but (not in new, or in new but attributes changed). */ | |
584 | ||
b8af1afb AK |
585 | if (!adding) { |
586 | as->ops->range_del(as, frold); | |
587 | } | |
588 | ||
093bc2cd AK |
589 | ++iold; |
590 | } else if (frold && frnew && flatrange_equal(frold, frnew)) { | |
591 | /* In both (logging may have changed) */ | |
592 | ||
b8af1afb AK |
593 | if (adding) { |
594 | if (frold->dirty_log_mask && !frnew->dirty_log_mask) { | |
595 | as->ops->log_stop(as, frnew); | |
596 | } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) { | |
597 | as->ops->log_start(as, frnew); | |
598 | } | |
5a583347 AK |
599 | } |
600 | ||
093bc2cd AK |
601 | ++iold; |
602 | ++inew; | |
093bc2cd AK |
603 | } else { |
604 | /* In new */ | |
605 | ||
b8af1afb AK |
606 | if (adding) { |
607 | as->ops->range_add(as, frnew); | |
608 | } | |
609 | ||
093bc2cd AK |
610 | ++inew; |
611 | } | |
612 | } | |
b8af1afb AK |
613 | } |
614 | ||
615 | ||
616 | static void address_space_update_topology(AddressSpace *as) | |
617 | { | |
618 | FlatView old_view = as->current_map; | |
619 | FlatView new_view = generate_memory_topology(as->root); | |
620 | ||
621 | address_space_update_topology_pass(as, old_view, new_view, false); | |
622 | address_space_update_topology_pass(as, old_view, new_view, true); | |
623 | ||
cc31e6e7 | 624 | as->current_map = new_view; |
093bc2cd | 625 | flatview_destroy(&old_view); |
3e9d69e7 | 626 | address_space_update_ioeventfds(as); |
093bc2cd AK |
627 | } |
628 | ||
cc31e6e7 AK |
629 | static void memory_region_update_topology(void) |
630 | { | |
4ef4db86 AK |
631 | if (memory_region_transaction_depth) { |
632 | return; | |
633 | } | |
634 | ||
658b2224 AK |
635 | if (address_space_memory.root) { |
636 | address_space_update_topology(&address_space_memory); | |
637 | } | |
638 | if (address_space_io.root) { | |
639 | address_space_update_topology(&address_space_io); | |
640 | } | |
cc31e6e7 AK |
641 | } |
642 | ||
4ef4db86 AK |
643 | void memory_region_transaction_begin(void) |
644 | { | |
645 | ++memory_region_transaction_depth; | |
646 | } | |
647 | ||
648 | void memory_region_transaction_commit(void) | |
649 | { | |
650 | assert(memory_region_transaction_depth); | |
651 | --memory_region_transaction_depth; | |
652 | memory_region_update_topology(); | |
653 | } | |
654 | ||
093bc2cd AK |
655 | void memory_region_init(MemoryRegion *mr, |
656 | const char *name, | |
657 | uint64_t size) | |
658 | { | |
659 | mr->ops = NULL; | |
660 | mr->parent = NULL; | |
661 | mr->size = size; | |
662 | mr->addr = 0; | |
663 | mr->offset = 0; | |
14a3c10a | 664 | mr->terminates = false; |
093bc2cd AK |
665 | mr->priority = 0; |
666 | mr->may_overlap = false; | |
667 | mr->alias = NULL; | |
668 | QTAILQ_INIT(&mr->subregions); | |
669 | memset(&mr->subregions_link, 0, sizeof mr->subregions_link); | |
670 | QTAILQ_INIT(&mr->coalesced); | |
671 | mr->name = qemu_strdup(name); | |
5a583347 | 672 | mr->dirty_log_mask = 0; |
3e9d69e7 AK |
673 | mr->ioeventfd_nb = 0; |
674 | mr->ioeventfds = NULL; | |
093bc2cd AK |
675 | } |
676 | ||
677 | static bool memory_region_access_valid(MemoryRegion *mr, | |
678 | target_phys_addr_t addr, | |
679 | unsigned size) | |
680 | { | |
681 | if (!mr->ops->valid.unaligned && (addr & (size - 1))) { | |
682 | return false; | |
683 | } | |
684 | ||
685 | /* Treat zero as compatibility all valid */ | |
686 | if (!mr->ops->valid.max_access_size) { | |
687 | return true; | |
688 | } | |
689 | ||
690 | if (size > mr->ops->valid.max_access_size | |
691 | || size < mr->ops->valid.min_access_size) { | |
692 | return false; | |
693 | } | |
694 | return true; | |
695 | } | |
696 | ||
697 | static uint32_t memory_region_read_thunk_n(void *_mr, | |
698 | target_phys_addr_t addr, | |
699 | unsigned size) | |
700 | { | |
701 | MemoryRegion *mr = _mr; | |
702 | unsigned access_size, access_size_min, access_size_max; | |
703 | uint64_t access_mask; | |
704 | uint32_t data = 0, tmp; | |
705 | unsigned i; | |
706 | ||
707 | if (!memory_region_access_valid(mr, addr, size)) { | |
708 | return -1U; /* FIXME: better signalling */ | |
709 | } | |
710 | ||
74901c3b AK |
711 | if (!mr->ops->read) { |
712 | return mr->ops->old_mmio.read[bitops_ffsl(size)](mr->opaque, addr); | |
713 | } | |
714 | ||
093bc2cd AK |
715 | /* FIXME: support unaligned access */ |
716 | ||
717 | access_size_min = mr->ops->impl.min_access_size; | |
718 | if (!access_size_min) { | |
719 | access_size_min = 1; | |
720 | } | |
721 | access_size_max = mr->ops->impl.max_access_size; | |
722 | if (!access_size_max) { | |
723 | access_size_max = 4; | |
724 | } | |
725 | access_size = MAX(MIN(size, access_size_max), access_size_min); | |
726 | access_mask = -1ULL >> (64 - access_size * 8); | |
727 | addr += mr->offset; | |
728 | for (i = 0; i < size; i += access_size) { | |
729 | /* FIXME: big-endian support */ | |
730 | tmp = mr->ops->read(mr->opaque, addr + i, access_size); | |
731 | data |= (tmp & access_mask) << (i * 8); | |
732 | } | |
733 | ||
734 | return data; | |
735 | } | |
736 | ||
737 | static void memory_region_write_thunk_n(void *_mr, | |
738 | target_phys_addr_t addr, | |
739 | unsigned size, | |
740 | uint64_t data) | |
741 | { | |
742 | MemoryRegion *mr = _mr; | |
743 | unsigned access_size, access_size_min, access_size_max; | |
744 | uint64_t access_mask; | |
745 | unsigned i; | |
746 | ||
747 | if (!memory_region_access_valid(mr, addr, size)) { | |
748 | return; /* FIXME: better signalling */ | |
749 | } | |
750 | ||
74901c3b AK |
751 | if (!mr->ops->write) { |
752 | mr->ops->old_mmio.write[bitops_ffsl(size)](mr->opaque, addr, data); | |
753 | return; | |
754 | } | |
755 | ||
093bc2cd AK |
756 | /* FIXME: support unaligned access */ |
757 | ||
758 | access_size_min = mr->ops->impl.min_access_size; | |
759 | if (!access_size_min) { | |
760 | access_size_min = 1; | |
761 | } | |
762 | access_size_max = mr->ops->impl.max_access_size; | |
763 | if (!access_size_max) { | |
764 | access_size_max = 4; | |
765 | } | |
766 | access_size = MAX(MIN(size, access_size_max), access_size_min); | |
767 | access_mask = -1ULL >> (64 - access_size * 8); | |
768 | addr += mr->offset; | |
769 | for (i = 0; i < size; i += access_size) { | |
770 | /* FIXME: big-endian support */ | |
771 | mr->ops->write(mr->opaque, addr + i, (data >> (i * 8)) & access_mask, | |
772 | access_size); | |
773 | } | |
774 | } | |
775 | ||
776 | static uint32_t memory_region_read_thunk_b(void *mr, target_phys_addr_t addr) | |
777 | { | |
778 | return memory_region_read_thunk_n(mr, addr, 1); | |
779 | } | |
780 | ||
781 | static uint32_t memory_region_read_thunk_w(void *mr, target_phys_addr_t addr) | |
782 | { | |
783 | return memory_region_read_thunk_n(mr, addr, 2); | |
784 | } | |
785 | ||
786 | static uint32_t memory_region_read_thunk_l(void *mr, target_phys_addr_t addr) | |
787 | { | |
788 | return memory_region_read_thunk_n(mr, addr, 4); | |
789 | } | |
790 | ||
791 | static void memory_region_write_thunk_b(void *mr, target_phys_addr_t addr, | |
792 | uint32_t data) | |
793 | { | |
794 | memory_region_write_thunk_n(mr, addr, 1, data); | |
795 | } | |
796 | ||
797 | static void memory_region_write_thunk_w(void *mr, target_phys_addr_t addr, | |
798 | uint32_t data) | |
799 | { | |
800 | memory_region_write_thunk_n(mr, addr, 2, data); | |
801 | } | |
802 | ||
803 | static void memory_region_write_thunk_l(void *mr, target_phys_addr_t addr, | |
804 | uint32_t data) | |
805 | { | |
806 | memory_region_write_thunk_n(mr, addr, 4, data); | |
807 | } | |
808 | ||
809 | static CPUReadMemoryFunc * const memory_region_read_thunk[] = { | |
810 | memory_region_read_thunk_b, | |
811 | memory_region_read_thunk_w, | |
812 | memory_region_read_thunk_l, | |
813 | }; | |
814 | ||
815 | static CPUWriteMemoryFunc * const memory_region_write_thunk[] = { | |
816 | memory_region_write_thunk_b, | |
817 | memory_region_write_thunk_w, | |
818 | memory_region_write_thunk_l, | |
819 | }; | |
820 | ||
16ef61c9 AK |
821 | static void memory_region_prepare_ram_addr(MemoryRegion *mr) |
822 | { | |
823 | if (mr->backend_registered) { | |
824 | return; | |
825 | } | |
826 | ||
827 | mr->ram_addr = cpu_register_io_memory(memory_region_read_thunk, | |
828 | memory_region_write_thunk, | |
829 | mr, | |
830 | mr->ops->endianness); | |
831 | mr->backend_registered = true; | |
832 | } | |
833 | ||
093bc2cd AK |
834 | void memory_region_init_io(MemoryRegion *mr, |
835 | const MemoryRegionOps *ops, | |
836 | void *opaque, | |
837 | const char *name, | |
838 | uint64_t size) | |
839 | { | |
840 | memory_region_init(mr, name, size); | |
841 | mr->ops = ops; | |
842 | mr->opaque = opaque; | |
14a3c10a | 843 | mr->terminates = true; |
16ef61c9 | 844 | mr->backend_registered = false; |
093bc2cd AK |
845 | } |
846 | ||
847 | void memory_region_init_ram(MemoryRegion *mr, | |
848 | DeviceState *dev, | |
849 | const char *name, | |
850 | uint64_t size) | |
851 | { | |
852 | memory_region_init(mr, name, size); | |
14a3c10a | 853 | mr->terminates = true; |
093bc2cd | 854 | mr->ram_addr = qemu_ram_alloc(dev, name, size); |
16ef61c9 | 855 | mr->backend_registered = true; |
093bc2cd AK |
856 | } |
857 | ||
858 | void memory_region_init_ram_ptr(MemoryRegion *mr, | |
859 | DeviceState *dev, | |
860 | const char *name, | |
861 | uint64_t size, | |
862 | void *ptr) | |
863 | { | |
864 | memory_region_init(mr, name, size); | |
14a3c10a | 865 | mr->terminates = true; |
093bc2cd | 866 | mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr); |
16ef61c9 | 867 | mr->backend_registered = true; |
093bc2cd AK |
868 | } |
869 | ||
870 | void memory_region_init_alias(MemoryRegion *mr, | |
871 | const char *name, | |
872 | MemoryRegion *orig, | |
873 | target_phys_addr_t offset, | |
874 | uint64_t size) | |
875 | { | |
876 | memory_region_init(mr, name, size); | |
877 | mr->alias = orig; | |
878 | mr->alias_offset = offset; | |
879 | } | |
880 | ||
881 | void memory_region_destroy(MemoryRegion *mr) | |
882 | { | |
883 | assert(QTAILQ_EMPTY(&mr->subregions)); | |
884 | memory_region_clear_coalescing(mr); | |
885 | qemu_free((char *)mr->name); | |
3e9d69e7 | 886 | qemu_free(mr->ioeventfds); |
093bc2cd AK |
887 | } |
888 | ||
889 | uint64_t memory_region_size(MemoryRegion *mr) | |
890 | { | |
891 | return mr->size; | |
892 | } | |
893 | ||
894 | void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset) | |
895 | { | |
896 | mr->offset = offset; | |
897 | } | |
898 | ||
899 | void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) | |
900 | { | |
5a583347 AK |
901 | uint8_t mask = 1 << client; |
902 | ||
903 | mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask); | |
904 | memory_region_update_topology(); | |
093bc2cd AK |
905 | } |
906 | ||
907 | bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr, | |
908 | unsigned client) | |
909 | { | |
14a3c10a | 910 | assert(mr->terminates); |
5a583347 | 911 | return cpu_physical_memory_get_dirty(mr->ram_addr + addr, 1 << client); |
093bc2cd AK |
912 | } |
913 | ||
914 | void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr) | |
915 | { | |
14a3c10a | 916 | assert(mr->terminates); |
5a583347 | 917 | return cpu_physical_memory_set_dirty(mr->ram_addr + addr); |
093bc2cd AK |
918 | } |
919 | ||
920 | void memory_region_sync_dirty_bitmap(MemoryRegion *mr) | |
921 | { | |
5a583347 AK |
922 | FlatRange *fr; |
923 | ||
cc31e6e7 | 924 | FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) { |
5a583347 AK |
925 | if (fr->mr == mr) { |
926 | cpu_physical_sync_dirty_bitmap(fr->addr.start, | |
927 | fr->addr.start + fr->addr.size); | |
928 | } | |
929 | } | |
093bc2cd AK |
930 | } |
931 | ||
932 | void memory_region_set_readonly(MemoryRegion *mr, bool readonly) | |
933 | { | |
934 | /* FIXME */ | |
935 | } | |
936 | ||
937 | void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr, | |
938 | target_phys_addr_t size, unsigned client) | |
939 | { | |
14a3c10a | 940 | assert(mr->terminates); |
5a583347 AK |
941 | cpu_physical_memory_reset_dirty(mr->ram_addr + addr, |
942 | mr->ram_addr + addr + size, | |
943 | 1 << client); | |
093bc2cd AK |
944 | } |
945 | ||
946 | void *memory_region_get_ram_ptr(MemoryRegion *mr) | |
947 | { | |
948 | if (mr->alias) { | |
949 | return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset; | |
950 | } | |
951 | ||
14a3c10a | 952 | assert(mr->terminates); |
093bc2cd AK |
953 | |
954 | return qemu_get_ram_ptr(mr->ram_addr); | |
955 | } | |
956 | ||
957 | static void memory_region_update_coalesced_range(MemoryRegion *mr) | |
958 | { | |
959 | FlatRange *fr; | |
960 | CoalescedMemoryRange *cmr; | |
961 | AddrRange tmp; | |
962 | ||
cc31e6e7 | 963 | FOR_EACH_FLAT_RANGE(fr, &address_space_memory.current_map) { |
093bc2cd AK |
964 | if (fr->mr == mr) { |
965 | qemu_unregister_coalesced_mmio(fr->addr.start, fr->addr.size); | |
966 | QTAILQ_FOREACH(cmr, &mr->coalesced, link) { | |
967 | tmp = addrrange_shift(cmr->addr, | |
968 | fr->addr.start - fr->offset_in_region); | |
969 | if (!addrrange_intersects(tmp, fr->addr)) { | |
970 | continue; | |
971 | } | |
972 | tmp = addrrange_intersection(tmp, fr->addr); | |
973 | qemu_register_coalesced_mmio(tmp.start, tmp.size); | |
974 | } | |
975 | } | |
976 | } | |
977 | } | |
978 | ||
979 | void memory_region_set_coalescing(MemoryRegion *mr) | |
980 | { | |
981 | memory_region_clear_coalescing(mr); | |
982 | memory_region_add_coalescing(mr, 0, mr->size); | |
983 | } | |
984 | ||
985 | void memory_region_add_coalescing(MemoryRegion *mr, | |
986 | target_phys_addr_t offset, | |
987 | uint64_t size) | |
988 | { | |
989 | CoalescedMemoryRange *cmr = qemu_malloc(sizeof(*cmr)); | |
990 | ||
991 | cmr->addr = addrrange_make(offset, size); | |
992 | QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link); | |
993 | memory_region_update_coalesced_range(mr); | |
994 | } | |
995 | ||
996 | void memory_region_clear_coalescing(MemoryRegion *mr) | |
997 | { | |
998 | CoalescedMemoryRange *cmr; | |
999 | ||
1000 | while (!QTAILQ_EMPTY(&mr->coalesced)) { | |
1001 | cmr = QTAILQ_FIRST(&mr->coalesced); | |
1002 | QTAILQ_REMOVE(&mr->coalesced, cmr, link); | |
1003 | qemu_free(cmr); | |
1004 | } | |
1005 | memory_region_update_coalesced_range(mr); | |
1006 | } | |
1007 | ||
3e9d69e7 AK |
1008 | void memory_region_add_eventfd(MemoryRegion *mr, |
1009 | target_phys_addr_t addr, | |
1010 | unsigned size, | |
1011 | bool match_data, | |
1012 | uint64_t data, | |
1013 | int fd) | |
1014 | { | |
1015 | MemoryRegionIoeventfd mrfd = { | |
1016 | .addr.start = addr, | |
1017 | .addr.size = size, | |
1018 | .match_data = match_data, | |
1019 | .data = data, | |
1020 | .fd = fd, | |
1021 | }; | |
1022 | unsigned i; | |
1023 | ||
1024 | for (i = 0; i < mr->ioeventfd_nb; ++i) { | |
1025 | if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) { | |
1026 | break; | |
1027 | } | |
1028 | } | |
1029 | ++mr->ioeventfd_nb; | |
1030 | mr->ioeventfds = qemu_realloc(mr->ioeventfds, | |
1031 | sizeof(*mr->ioeventfds) * mr->ioeventfd_nb); | |
1032 | memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i], | |
1033 | sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i)); | |
1034 | mr->ioeventfds[i] = mrfd; | |
1035 | memory_region_update_topology(); | |
1036 | } | |
1037 | ||
1038 | void memory_region_del_eventfd(MemoryRegion *mr, | |
1039 | target_phys_addr_t addr, | |
1040 | unsigned size, | |
1041 | bool match_data, | |
1042 | uint64_t data, | |
1043 | int fd) | |
1044 | { | |
1045 | MemoryRegionIoeventfd mrfd = { | |
1046 | .addr.start = addr, | |
1047 | .addr.size = size, | |
1048 | .match_data = match_data, | |
1049 | .data = data, | |
1050 | .fd = fd, | |
1051 | }; | |
1052 | unsigned i; | |
1053 | ||
1054 | for (i = 0; i < mr->ioeventfd_nb; ++i) { | |
1055 | if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) { | |
1056 | break; | |
1057 | } | |
1058 | } | |
1059 | assert(i != mr->ioeventfd_nb); | |
1060 | memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1], | |
1061 | sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1))); | |
1062 | --mr->ioeventfd_nb; | |
1063 | mr->ioeventfds = qemu_realloc(mr->ioeventfds, | |
1064 | sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1); | |
1065 | memory_region_update_topology(); | |
1066 | } | |
1067 | ||
093bc2cd AK |
1068 | static void memory_region_add_subregion_common(MemoryRegion *mr, |
1069 | target_phys_addr_t offset, | |
1070 | MemoryRegion *subregion) | |
1071 | { | |
1072 | MemoryRegion *other; | |
1073 | ||
1074 | assert(!subregion->parent); | |
1075 | subregion->parent = mr; | |
1076 | subregion->addr = offset; | |
1077 | QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { | |
1078 | if (subregion->may_overlap || other->may_overlap) { | |
1079 | continue; | |
1080 | } | |
1081 | if (offset >= other->offset + other->size | |
1082 | || offset + subregion->size <= other->offset) { | |
1083 | continue; | |
1084 | } | |
1085 | printf("warning: subregion collision %llx/%llx vs %llx/%llx\n", | |
1086 | (unsigned long long)offset, | |
1087 | (unsigned long long)subregion->size, | |
1088 | (unsigned long long)other->offset, | |
1089 | (unsigned long long)other->size); | |
1090 | } | |
1091 | QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { | |
1092 | if (subregion->priority >= other->priority) { | |
1093 | QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); | |
1094 | goto done; | |
1095 | } | |
1096 | } | |
1097 | QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); | |
1098 | done: | |
1099 | memory_region_update_topology(); | |
1100 | } | |
1101 | ||
1102 | ||
1103 | void memory_region_add_subregion(MemoryRegion *mr, | |
1104 | target_phys_addr_t offset, | |
1105 | MemoryRegion *subregion) | |
1106 | { | |
1107 | subregion->may_overlap = false; | |
1108 | subregion->priority = 0; | |
1109 | memory_region_add_subregion_common(mr, offset, subregion); | |
1110 | } | |
1111 | ||
1112 | void memory_region_add_subregion_overlap(MemoryRegion *mr, | |
1113 | target_phys_addr_t offset, | |
1114 | MemoryRegion *subregion, | |
1115 | unsigned priority) | |
1116 | { | |
1117 | subregion->may_overlap = true; | |
1118 | subregion->priority = priority; | |
1119 | memory_region_add_subregion_common(mr, offset, subregion); | |
1120 | } | |
1121 | ||
1122 | void memory_region_del_subregion(MemoryRegion *mr, | |
1123 | MemoryRegion *subregion) | |
1124 | { | |
1125 | assert(subregion->parent == mr); | |
1126 | subregion->parent = NULL; | |
1127 | QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); | |
1128 | memory_region_update_topology(); | |
1129 | } | |
1c0ffa58 AK |
1130 | |
1131 | void set_system_memory_map(MemoryRegion *mr) | |
1132 | { | |
cc31e6e7 | 1133 | address_space_memory.root = mr; |
1c0ffa58 AK |
1134 | memory_region_update_topology(); |
1135 | } | |
658b2224 AK |
1136 | |
1137 | void set_system_io_map(MemoryRegion *mr) | |
1138 | { | |
1139 | address_space_io.root = mr; | |
1140 | memory_region_update_topology(); | |
1141 | } |