]>
Commit | Line | Data |
---|---|---|
093bc2cd AK |
1 | /* |
2 | * Physical memory management API | |
3 | * | |
4 | * Copyright 2011 Red Hat, Inc. and/or its affiliates | |
5 | * | |
6 | * Authors: | |
7 | * Avi Kivity <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
14 | #ifndef MEMORY_H | |
15 | #define MEMORY_H | |
16 | ||
17 | #ifndef CONFIG_USER_ONLY | |
18 | ||
1ab4c8ce JQ |
19 | #define DIRTY_MEMORY_VGA 0 |
20 | #define DIRTY_MEMORY_CODE 1 | |
21 | #define DIRTY_MEMORY_MIGRATION 2 | |
22 | #define DIRTY_MEMORY_NUM 3 /* num of dirty bits */ | |
23 | ||
093bc2cd AK |
24 | #include <stdint.h> |
25 | #include <stdbool.h> | |
26 | #include "qemu-common.h" | |
022c62cb | 27 | #include "exec/cpu-common.h" |
ce927ed9 | 28 | #ifndef CONFIG_USER_ONLY |
022c62cb | 29 | #include "exec/hwaddr.h" |
ce927ed9 | 30 | #endif |
1de7afc9 | 31 | #include "qemu/queue.h" |
1de7afc9 | 32 | #include "qemu/int128.h" |
06866575 | 33 | #include "qemu/notify.h" |
7f56e740 | 34 | #include "qapi/error.h" |
b4fefef9 | 35 | #include "qom/object.h" |
093bc2cd | 36 | |
052e87b0 PB |
37 | #define MAX_PHYS_ADDR_SPACE_BITS 62 |
38 | #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1) | |
39 | ||
b4fefef9 PC |
40 | #define TYPE_MEMORY_REGION "qemu:memory-region" |
41 | #define MEMORY_REGION(obj) \ | |
42 | OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION) | |
43 | ||
093bc2cd | 44 | typedef struct MemoryRegionOps MemoryRegionOps; |
74901c3b | 45 | typedef struct MemoryRegionMmio MemoryRegionMmio; |
093bc2cd | 46 | |
74901c3b AK |
47 | struct MemoryRegionMmio { |
48 | CPUReadMemoryFunc *read[3]; | |
49 | CPUWriteMemoryFunc *write[3]; | |
50 | }; | |
51 | ||
30951157 AK |
52 | typedef struct IOMMUTLBEntry IOMMUTLBEntry; |
53 | ||
54 | /* See address_space_translate: bit 0 is read, bit 1 is write. */ | |
55 | typedef enum { | |
56 | IOMMU_NONE = 0, | |
57 | IOMMU_RO = 1, | |
58 | IOMMU_WO = 2, | |
59 | IOMMU_RW = 3, | |
60 | } IOMMUAccessFlags; | |
61 | ||
62 | struct IOMMUTLBEntry { | |
63 | AddressSpace *target_as; | |
64 | hwaddr iova; | |
65 | hwaddr translated_addr; | |
66 | hwaddr addr_mask; /* 0xfff = 4k translation */ | |
67 | IOMMUAccessFlags perm; | |
68 | }; | |
69 | ||
093bc2cd AK |
70 | /* |
71 | * Memory region callbacks | |
72 | */ | |
73 | struct MemoryRegionOps { | |
74 | /* Read from the memory region. @addr is relative to @mr; @size is | |
75 | * in bytes. */ | |
76 | uint64_t (*read)(void *opaque, | |
a8170e5e | 77 | hwaddr addr, |
093bc2cd AK |
78 | unsigned size); |
79 | /* Write to the memory region. @addr is relative to @mr; @size is | |
80 | * in bytes. */ | |
81 | void (*write)(void *opaque, | |
a8170e5e | 82 | hwaddr addr, |
093bc2cd AK |
83 | uint64_t data, |
84 | unsigned size); | |
85 | ||
86 | enum device_endian endianness; | |
87 | /* Guest-visible constraints: */ | |
88 | struct { | |
89 | /* If nonzero, specify bounds on access sizes beyond which a machine | |
90 | * check is thrown. | |
91 | */ | |
92 | unsigned min_access_size; | |
93 | unsigned max_access_size; | |
94 | /* If true, unaligned accesses are supported. Otherwise unaligned | |
95 | * accesses throw machine checks. | |
96 | */ | |
97 | bool unaligned; | |
897fa7cf AK |
98 | /* |
99 | * If present, and returns #false, the transaction is not accepted | |
100 | * by the device (and results in machine dependent behaviour such | |
101 | * as a machine check exception). | |
102 | */ | |
a8170e5e | 103 | bool (*accepts)(void *opaque, hwaddr addr, |
897fa7cf | 104 | unsigned size, bool is_write); |
093bc2cd AK |
105 | } valid; |
106 | /* Internal implementation constraints: */ | |
107 | struct { | |
108 | /* If nonzero, specifies the minimum size implemented. Smaller sizes | |
109 | * will be rounded upwards and a partial result will be returned. | |
110 | */ | |
111 | unsigned min_access_size; | |
112 | /* If nonzero, specifies the maximum size implemented. Larger sizes | |
113 | * will be done as a series of accesses with smaller sizes. | |
114 | */ | |
115 | unsigned max_access_size; | |
116 | /* If true, unaligned accesses are supported. Otherwise all accesses | |
117 | * are converted to (possibly multiple) naturally aligned accesses. | |
118 | */ | |
edc1ba7a | 119 | bool unaligned; |
093bc2cd | 120 | } impl; |
627a0e90 | 121 | |
74901c3b AK |
122 | /* If .read and .write are not present, old_mmio may be used for |
123 | * backwards compatibility with old mmio registration | |
124 | */ | |
125 | const MemoryRegionMmio old_mmio; | |
093bc2cd AK |
126 | }; |
127 | ||
30951157 AK |
128 | typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps; |
129 | ||
130 | struct MemoryRegionIOMMUOps { | |
131 | /* Return a TLB entry that contains a given address. */ | |
8d7b8cb9 | 132 | IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write); |
30951157 AK |
133 | }; |
134 | ||
093bc2cd | 135 | typedef struct CoalescedMemoryRange CoalescedMemoryRange; |
3e9d69e7 | 136 | typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd; |
093bc2cd AK |
137 | |
138 | struct MemoryRegion { | |
b4fefef9 | 139 | Object parent_obj; |
093bc2cd AK |
140 | /* All fields are private - violators will be prosecuted */ |
141 | const MemoryRegionOps *ops; | |
30951157 | 142 | const MemoryRegionIOMMUOps *iommu_ops; |
093bc2cd | 143 | void *opaque; |
feca4ac1 | 144 | MemoryRegion *container; |
08dafab4 | 145 | Int128 size; |
a8170e5e | 146 | hwaddr addr; |
545e92e0 | 147 | void (*destructor)(MemoryRegion *mr); |
093bc2cd | 148 | ram_addr_t ram_addr; |
b3b00c78 | 149 | bool subpage; |
14a3c10a | 150 | bool terminates; |
5f9a5ea1 | 151 | bool romd_mode; |
8ea9252a | 152 | bool ram; |
e4dc3f59 | 153 | bool skip_dump; |
fb1cd6f9 | 154 | bool readonly; /* For RAM regions */ |
6bba19ba | 155 | bool enabled; |
75c578dc | 156 | bool rom_device; |
1660e72d | 157 | bool warning_printed; /* For reservations */ |
d410515e | 158 | bool flush_coalesced_mmio; |
093bc2cd | 159 | MemoryRegion *alias; |
a8170e5e | 160 | hwaddr alias_offset; |
d33382da | 161 | int32_t priority; |
093bc2cd AK |
162 | bool may_overlap; |
163 | QTAILQ_HEAD(subregions, MemoryRegion) subregions; | |
164 | QTAILQ_ENTRY(MemoryRegion) subregions_link; | |
165 | QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced; | |
302fa283 | 166 | const char *name; |
5a583347 | 167 | uint8_t dirty_log_mask; |
3e9d69e7 AK |
168 | unsigned ioeventfd_nb; |
169 | MemoryRegionIoeventfd *ioeventfds; | |
06866575 | 170 | NotifierList iommu_notify; |
093bc2cd AK |
171 | }; |
172 | ||
c2fc83e8 PB |
173 | /** |
174 | * MemoryListener: callbacks structure for updates to the physical memory map | |
175 | * | |
176 | * Allows a component to adjust to changes in the guest-visible memory map. | |
177 | * Use with memory_listener_register() and memory_listener_unregister(). | |
178 | */ | |
179 | struct MemoryListener { | |
180 | void (*begin)(MemoryListener *listener); | |
181 | void (*commit)(MemoryListener *listener); | |
182 | void (*region_add)(MemoryListener *listener, MemoryRegionSection *section); | |
183 | void (*region_del)(MemoryListener *listener, MemoryRegionSection *section); | |
184 | void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section); | |
185 | void (*log_start)(MemoryListener *listener, MemoryRegionSection *section); | |
186 | void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section); | |
187 | void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section); | |
188 | void (*log_global_start)(MemoryListener *listener); | |
189 | void (*log_global_stop)(MemoryListener *listener); | |
190 | void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section, | |
191 | bool match_data, uint64_t data, EventNotifier *e); | |
192 | void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section, | |
193 | bool match_data, uint64_t data, EventNotifier *e); | |
194 | void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section, | |
195 | hwaddr addr, hwaddr len); | |
196 | void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section, | |
197 | hwaddr addr, hwaddr len); | |
198 | /* Lower = earlier (during add), later (during del) */ | |
199 | unsigned priority; | |
200 | AddressSpace *address_space_filter; | |
201 | QTAILQ_ENTRY(MemoryListener) link; | |
202 | }; | |
203 | ||
9ad2bbc1 AK |
204 | /** |
205 | * AddressSpace: describes a mapping of addresses to #MemoryRegion objects | |
206 | */ | |
207 | struct AddressSpace { | |
208 | /* All fields are private. */ | |
7dca8043 | 209 | char *name; |
9ad2bbc1 AK |
210 | MemoryRegion *root; |
211 | struct FlatView *current_map; | |
212 | int ioeventfd_nb; | |
213 | struct MemoryRegionIoeventfd *ioeventfds; | |
ac1970fb | 214 | struct AddressSpaceDispatch *dispatch; |
00752703 | 215 | struct AddressSpaceDispatch *next_dispatch; |
89ae337a PB |
216 | MemoryListener dispatch_listener; |
217 | ||
0d673e36 | 218 | QTAILQ_ENTRY(AddressSpace) address_spaces_link; |
9ad2bbc1 AK |
219 | }; |
220 | ||
e2177955 AK |
221 | /** |
222 | * MemoryRegionSection: describes a fragment of a #MemoryRegion | |
223 | * | |
224 | * @mr: the region, or %NULL if empty | |
7664e80c | 225 | * @address_space: the address space the region is mapped in |
e2177955 AK |
226 | * @offset_within_region: the beginning of the section, relative to @mr's start |
227 | * @size: the size of the section; will not exceed @mr's boundaries | |
228 | * @offset_within_address_space: the address of the first byte of the section | |
229 | * relative to the region's address space | |
7a8499e8 | 230 | * @readonly: writes to this section are ignored |
e2177955 AK |
231 | */ |
232 | struct MemoryRegionSection { | |
233 | MemoryRegion *mr; | |
f6790af6 | 234 | AddressSpace *address_space; |
a8170e5e | 235 | hwaddr offset_within_region; |
052e87b0 | 236 | Int128 size; |
a8170e5e | 237 | hwaddr offset_within_address_space; |
7a8499e8 | 238 | bool readonly; |
e2177955 AK |
239 | }; |
240 | ||
093bc2cd AK |
241 | /** |
242 | * memory_region_init: Initialize a memory region | |
243 | * | |
69ddaf66 | 244 | * The region typically acts as a container for other memory regions. Use |
093bc2cd AK |
245 | * memory_region_add_subregion() to add subregions. |
246 | * | |
247 | * @mr: the #MemoryRegion to be initialized | |
2c9b15ca | 248 | * @owner: the object that tracks the region's reference count |
093bc2cd AK |
249 | * @name: used for debugging; not visible to the user or ABI |
250 | * @size: size of the region; any subregions beyond this size will be clipped | |
251 | */ | |
252 | void memory_region_init(MemoryRegion *mr, | |
2c9b15ca | 253 | struct Object *owner, |
093bc2cd AK |
254 | const char *name, |
255 | uint64_t size); | |
46637be2 PB |
256 | |
257 | /** | |
258 | * memory_region_ref: Add 1 to a memory region's reference count | |
259 | * | |
260 | * Whenever memory regions are accessed outside the BQL, they need to be | |
261 | * preserved against hot-unplug. MemoryRegions actually do not have their | |
262 | * own reference count; they piggyback on a QOM object, their "owner". | |
263 | * This function adds a reference to the owner. | |
264 | * | |
265 | * All MemoryRegions must have an owner if they can disappear, even if the | |
266 | * device they belong to operates exclusively under the BQL. This is because | |
267 | * the region could be returned at any time by memory_region_find, and this | |
268 | * is usually under guest control. | |
269 | * | |
270 | * @mr: the #MemoryRegion | |
271 | */ | |
272 | void memory_region_ref(MemoryRegion *mr); | |
273 | ||
274 | /** | |
275 | * memory_region_unref: Remove 1 to a memory region's reference count | |
276 | * | |
277 | * Whenever memory regions are accessed outside the BQL, they need to be | |
278 | * preserved against hot-unplug. MemoryRegions actually do not have their | |
279 | * own reference count; they piggyback on a QOM object, their "owner". | |
280 | * This function removes a reference to the owner and possibly destroys it. | |
281 | * | |
282 | * @mr: the #MemoryRegion | |
283 | */ | |
284 | void memory_region_unref(MemoryRegion *mr); | |
285 | ||
093bc2cd AK |
286 | /** |
287 | * memory_region_init_io: Initialize an I/O memory region. | |
288 | * | |
69ddaf66 | 289 | * Accesses into the region will cause the callbacks in @ops to be called. |
093bc2cd AK |
290 | * if @size is nonzero, subregions will be clipped to @size. |
291 | * | |
292 | * @mr: the #MemoryRegion to be initialized. | |
2c9b15ca | 293 | * @owner: the object that tracks the region's reference count |
093bc2cd AK |
294 | * @ops: a structure containing read and write callbacks to be used when |
295 | * I/O is performed on the region. | |
296 | * @opaque: passed to to the read and write callbacks of the @ops structure. | |
297 | * @name: used for debugging; not visible to the user or ABI | |
298 | * @size: size of the region. | |
299 | */ | |
300 | void memory_region_init_io(MemoryRegion *mr, | |
2c9b15ca | 301 | struct Object *owner, |
093bc2cd AK |
302 | const MemoryRegionOps *ops, |
303 | void *opaque, | |
304 | const char *name, | |
305 | uint64_t size); | |
306 | ||
307 | /** | |
308 | * memory_region_init_ram: Initialize RAM memory region. Accesses into the | |
69ddaf66 | 309 | * region will modify memory directly. |
093bc2cd AK |
310 | * |
311 | * @mr: the #MemoryRegion to be initialized. | |
2c9b15ca | 312 | * @owner: the object that tracks the region's reference count |
c5705a77 | 313 | * @name: the name of the region. |
093bc2cd | 314 | * @size: size of the region. |
49946538 | 315 | * @errp: pointer to Error*, to store an error if it happens. |
093bc2cd AK |
316 | */ |
317 | void memory_region_init_ram(MemoryRegion *mr, | |
2c9b15ca | 318 | struct Object *owner, |
093bc2cd | 319 | const char *name, |
49946538 HT |
320 | uint64_t size, |
321 | Error **errp); | |
093bc2cd | 322 | |
0b183fc8 PB |
323 | #ifdef __linux__ |
324 | /** | |
325 | * memory_region_init_ram_from_file: Initialize RAM memory region with a | |
326 | * mmap-ed backend. | |
327 | * | |
328 | * @mr: the #MemoryRegion to be initialized. | |
329 | * @owner: the object that tracks the region's reference count | |
330 | * @name: the name of the region. | |
331 | * @size: size of the region. | |
dbcb8981 | 332 | * @share: %true if memory must be mmaped with the MAP_SHARED flag |
0b183fc8 | 333 | * @path: the path in which to allocate the RAM. |
7f56e740 | 334 | * @errp: pointer to Error*, to store an error if it happens. |
0b183fc8 PB |
335 | */ |
336 | void memory_region_init_ram_from_file(MemoryRegion *mr, | |
337 | struct Object *owner, | |
338 | const char *name, | |
339 | uint64_t size, | |
dbcb8981 | 340 | bool share, |
7f56e740 PB |
341 | const char *path, |
342 | Error **errp); | |
0b183fc8 PB |
343 | #endif |
344 | ||
093bc2cd | 345 | /** |
1a7e8cae BZ |
346 | * memory_region_init_ram_ptr: Initialize RAM memory region from a |
347 | * user-provided pointer. Accesses into the | |
348 | * region will modify memory directly. | |
093bc2cd AK |
349 | * |
350 | * @mr: the #MemoryRegion to be initialized. | |
2c9b15ca | 351 | * @owner: the object that tracks the region's reference count |
c5705a77 | 352 | * @name: the name of the region. |
093bc2cd AK |
353 | * @size: size of the region. |
354 | * @ptr: memory to be mapped; must contain at least @size bytes. | |
355 | */ | |
356 | void memory_region_init_ram_ptr(MemoryRegion *mr, | |
2c9b15ca | 357 | struct Object *owner, |
093bc2cd AK |
358 | const char *name, |
359 | uint64_t size, | |
360 | void *ptr); | |
361 | ||
362 | /** | |
363 | * memory_region_init_alias: Initialize a memory region that aliases all or a | |
364 | * part of another memory region. | |
365 | * | |
366 | * @mr: the #MemoryRegion to be initialized. | |
2c9b15ca | 367 | * @owner: the object that tracks the region's reference count |
093bc2cd AK |
368 | * @name: used for debugging; not visible to the user or ABI |
369 | * @orig: the region to be referenced; @mr will be equivalent to | |
370 | * @orig between @offset and @offset + @size - 1. | |
371 | * @offset: start of the section in @orig to be referenced. | |
372 | * @size: size of the region. | |
373 | */ | |
374 | void memory_region_init_alias(MemoryRegion *mr, | |
2c9b15ca | 375 | struct Object *owner, |
093bc2cd AK |
376 | const char *name, |
377 | MemoryRegion *orig, | |
a8170e5e | 378 | hwaddr offset, |
093bc2cd | 379 | uint64_t size); |
d0a9b5bc AK |
380 | |
381 | /** | |
382 | * memory_region_init_rom_device: Initialize a ROM memory region. Writes are | |
383 | * handled via callbacks. | |
384 | * | |
385 | * @mr: the #MemoryRegion to be initialized. | |
2c9b15ca | 386 | * @owner: the object that tracks the region's reference count |
d0a9b5bc | 387 | * @ops: callbacks for write access handling. |
c5705a77 | 388 | * @name: the name of the region. |
d0a9b5bc | 389 | * @size: size of the region. |
33e0eb52 | 390 | * @errp: pointer to Error*, to store an error if it happens. |
d0a9b5bc AK |
391 | */ |
392 | void memory_region_init_rom_device(MemoryRegion *mr, | |
2c9b15ca | 393 | struct Object *owner, |
d0a9b5bc | 394 | const MemoryRegionOps *ops, |
75f5941c | 395 | void *opaque, |
d0a9b5bc | 396 | const char *name, |
33e0eb52 HT |
397 | uint64_t size, |
398 | Error **errp); | |
d0a9b5bc | 399 | |
1660e72d JK |
400 | /** |
401 | * memory_region_init_reservation: Initialize a memory region that reserves | |
402 | * I/O space. | |
403 | * | |
404 | * A reservation region primariy serves debugging purposes. It claims I/O | |
405 | * space that is not supposed to be handled by QEMU itself. Any access via | |
406 | * the memory API will cause an abort(). | |
407 | * | |
408 | * @mr: the #MemoryRegion to be initialized | |
2c9b15ca | 409 | * @owner: the object that tracks the region's reference count |
1660e72d JK |
410 | * @name: used for debugging; not visible to the user or ABI |
411 | * @size: size of the region. | |
412 | */ | |
413 | void memory_region_init_reservation(MemoryRegion *mr, | |
2c9b15ca | 414 | struct Object *owner, |
1660e72d JK |
415 | const char *name, |
416 | uint64_t size); | |
30951157 AK |
417 | |
418 | /** | |
419 | * memory_region_init_iommu: Initialize a memory region that translates | |
420 | * addresses | |
421 | * | |
422 | * An IOMMU region translates addresses and forwards accesses to a target | |
423 | * memory region. | |
424 | * | |
425 | * @mr: the #MemoryRegion to be initialized | |
2c9b15ca | 426 | * @owner: the object that tracks the region's reference count |
30951157 AK |
427 | * @ops: a function that translates addresses into the @target region |
428 | * @name: used for debugging; not visible to the user or ABI | |
429 | * @size: size of the region. | |
430 | */ | |
431 | void memory_region_init_iommu(MemoryRegion *mr, | |
2c9b15ca | 432 | struct Object *owner, |
30951157 AK |
433 | const MemoryRegionIOMMUOps *ops, |
434 | const char *name, | |
435 | uint64_t size); | |
436 | ||
803c0816 PB |
437 | /** |
438 | * memory_region_owner: get a memory region's owner. | |
439 | * | |
440 | * @mr: the memory region being queried. | |
441 | */ | |
442 | struct Object *memory_region_owner(MemoryRegion *mr); | |
443 | ||
093bc2cd AK |
444 | /** |
445 | * memory_region_size: get a memory region's size. | |
446 | * | |
447 | * @mr: the memory region being queried. | |
448 | */ | |
449 | uint64_t memory_region_size(MemoryRegion *mr); | |
450 | ||
8ea9252a AK |
451 | /** |
452 | * memory_region_is_ram: check whether a memory region is random access | |
453 | * | |
454 | * Returns %true is a memory region is random access. | |
455 | * | |
456 | * @mr: the memory region being queried | |
457 | */ | |
458 | bool memory_region_is_ram(MemoryRegion *mr); | |
459 | ||
e4dc3f59 ND |
460 | /** |
461 | * memory_region_is_skip_dump: check whether a memory region should not be | |
462 | * dumped | |
463 | * | |
464 | * Returns %true is a memory region should not be dumped(e.g. VFIO BAR MMAP). | |
465 | * | |
466 | * @mr: the memory region being queried | |
467 | */ | |
468 | bool memory_region_is_skip_dump(MemoryRegion *mr); | |
469 | ||
470 | /** | |
471 | * memory_region_set_skip_dump: Set skip_dump flag, dump will ignore this memory | |
472 | * region | |
473 | * | |
474 | * @mr: the memory region being queried | |
475 | */ | |
476 | void memory_region_set_skip_dump(MemoryRegion *mr); | |
477 | ||
fd062573 | 478 | /** |
5f9a5ea1 | 479 | * memory_region_is_romd: check whether a memory region is in ROMD mode |
fd062573 | 480 | * |
5f9a5ea1 | 481 | * Returns %true if a memory region is a ROM device and currently set to allow |
fd062573 BS |
482 | * direct reads. |
483 | * | |
484 | * @mr: the memory region being queried | |
485 | */ | |
486 | static inline bool memory_region_is_romd(MemoryRegion *mr) | |
487 | { | |
5f9a5ea1 | 488 | return mr->rom_device && mr->romd_mode; |
fd062573 BS |
489 | } |
490 | ||
30951157 AK |
491 | /** |
492 | * memory_region_is_iommu: check whether a memory region is an iommu | |
493 | * | |
494 | * Returns %true is a memory region is an iommu. | |
495 | * | |
496 | * @mr: the memory region being queried | |
497 | */ | |
498 | bool memory_region_is_iommu(MemoryRegion *mr); | |
499 | ||
06866575 DG |
500 | /** |
501 | * memory_region_notify_iommu: notify a change in an IOMMU translation entry. | |
502 | * | |
503 | * @mr: the memory region that was changed | |
504 | * @entry: the new entry in the IOMMU translation table. The entry | |
505 | * replaces all old entries for the same virtual I/O address range. | |
506 | * Deleted entries have .@perm == 0. | |
507 | */ | |
508 | void memory_region_notify_iommu(MemoryRegion *mr, | |
509 | IOMMUTLBEntry entry); | |
510 | ||
511 | /** | |
512 | * memory_region_register_iommu_notifier: register a notifier for changes to | |
513 | * IOMMU translation entries. | |
514 | * | |
515 | * @mr: the memory region to observe | |
516 | * @n: the notifier to be added; the notifier receives a pointer to an | |
517 | * #IOMMUTLBEntry as the opaque value; the pointer ceases to be | |
518 | * valid on exit from the notifier. | |
519 | */ | |
520 | void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n); | |
521 | ||
522 | /** | |
523 | * memory_region_unregister_iommu_notifier: unregister a notifier for | |
524 | * changes to IOMMU translation entries. | |
525 | * | |
526 | * @n: the notifier to be removed. | |
527 | */ | |
528 | void memory_region_unregister_iommu_notifier(Notifier *n); | |
529 | ||
8991c79b AK |
530 | /** |
531 | * memory_region_name: get a memory region's name | |
532 | * | |
533 | * Returns the string that was used to initialize the memory region. | |
534 | * | |
535 | * @mr: the memory region being queried | |
536 | */ | |
5d546d4b | 537 | const char *memory_region_name(const MemoryRegion *mr); |
8991c79b | 538 | |
55043ba3 AK |
539 | /** |
540 | * memory_region_is_logging: return whether a memory region is logging writes | |
541 | * | |
542 | * Returns %true if the memory region is logging writes | |
543 | * | |
544 | * @mr: the memory region being queried | |
545 | */ | |
546 | bool memory_region_is_logging(MemoryRegion *mr); | |
547 | ||
ce7923da AK |
548 | /** |
549 | * memory_region_is_rom: check whether a memory region is ROM | |
550 | * | |
551 | * Returns %true is a memory region is read-only memory. | |
552 | * | |
553 | * @mr: the memory region being queried | |
554 | */ | |
555 | bool memory_region_is_rom(MemoryRegion *mr); | |
556 | ||
a35ba7be PB |
557 | /** |
558 | * memory_region_get_fd: Get a file descriptor backing a RAM memory region. | |
559 | * | |
560 | * Returns a file descriptor backing a file-based RAM memory region, | |
561 | * or -1 if the region is not a file-based RAM memory region. | |
562 | * | |
563 | * @mr: the RAM or alias memory region being queried. | |
564 | */ | |
565 | int memory_region_get_fd(MemoryRegion *mr); | |
566 | ||
093bc2cd AK |
567 | /** |
568 | * memory_region_get_ram_ptr: Get a pointer into a RAM memory region. | |
569 | * | |
570 | * Returns a host pointer to a RAM memory region (created with | |
571 | * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with | |
572 | * care. | |
573 | * | |
574 | * @mr: the memory region being queried. | |
575 | */ | |
576 | void *memory_region_get_ram_ptr(MemoryRegion *mr); | |
577 | ||
093bc2cd AK |
578 | /** |
579 | * memory_region_set_log: Turn dirty logging on or off for a region. | |
580 | * | |
581 | * Turns dirty logging on or off for a specified client (display, migration). | |
582 | * Only meaningful for RAM regions. | |
583 | * | |
584 | * @mr: the memory region being updated. | |
585 | * @log: whether dirty logging is to be enabled or disabled. | |
586 | * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or | |
587 | * %DIRTY_MEMORY_VGA. | |
588 | */ | |
589 | void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client); | |
590 | ||
591 | /** | |
cd7a45c9 BS |
592 | * memory_region_get_dirty: Check whether a range of bytes is dirty |
593 | * for a specified client. | |
093bc2cd | 594 | * |
cd7a45c9 | 595 | * Checks whether a range of bytes has been written to since the last |
093bc2cd AK |
596 | * call to memory_region_reset_dirty() with the same @client. Dirty logging |
597 | * must be enabled. | |
598 | * | |
599 | * @mr: the memory region being queried. | |
600 | * @addr: the address (relative to the start of the region) being queried. | |
cd7a45c9 | 601 | * @size: the size of the range being queried. |
093bc2cd AK |
602 | * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or |
603 | * %DIRTY_MEMORY_VGA. | |
604 | */ | |
a8170e5e AK |
605 | bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr, |
606 | hwaddr size, unsigned client); | |
093bc2cd AK |
607 | |
608 | /** | |
fd4aa979 | 609 | * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region. |
093bc2cd | 610 | * |
fd4aa979 BS |
611 | * Marks a range of bytes as dirty, after it has been dirtied outside |
612 | * guest code. | |
093bc2cd | 613 | * |
fd4aa979 | 614 | * @mr: the memory region being dirtied. |
093bc2cd | 615 | * @addr: the address (relative to the start of the region) being dirtied. |
fd4aa979 | 616 | * @size: size of the range being dirtied. |
093bc2cd | 617 | */ |
a8170e5e AK |
618 | void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, |
619 | hwaddr size); | |
093bc2cd | 620 | |
6c279db8 JQ |
621 | /** |
622 | * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty | |
623 | * for a specified client. It clears them. | |
624 | * | |
625 | * Checks whether a range of bytes has been written to since the last | |
626 | * call to memory_region_reset_dirty() with the same @client. Dirty logging | |
627 | * must be enabled. | |
628 | * | |
629 | * @mr: the memory region being queried. | |
630 | * @addr: the address (relative to the start of the region) being queried. | |
631 | * @size: the size of the range being queried. | |
632 | * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or | |
633 | * %DIRTY_MEMORY_VGA. | |
634 | */ | |
635 | bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr, | |
636 | hwaddr size, unsigned client); | |
093bc2cd AK |
637 | /** |
638 | * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with | |
639 | * any external TLBs (e.g. kvm) | |
640 | * | |
641 | * Flushes dirty information from accelerators such as kvm and vhost-net | |
642 | * and makes it available to users of the memory API. | |
643 | * | |
644 | * @mr: the region being flushed. | |
645 | */ | |
646 | void memory_region_sync_dirty_bitmap(MemoryRegion *mr); | |
647 | ||
648 | /** | |
649 | * memory_region_reset_dirty: Mark a range of pages as clean, for a specified | |
650 | * client. | |
651 | * | |
652 | * Marks a range of pages as no longer dirty. | |
653 | * | |
654 | * @mr: the region being updated. | |
655 | * @addr: the start of the subrange being cleaned. | |
656 | * @size: the size of the subrange being cleaned. | |
657 | * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or | |
658 | * %DIRTY_MEMORY_VGA. | |
659 | */ | |
a8170e5e AK |
660 | void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, |
661 | hwaddr size, unsigned client); | |
093bc2cd AK |
662 | |
663 | /** | |
664 | * memory_region_set_readonly: Turn a memory region read-only (or read-write) | |
665 | * | |
666 | * Allows a memory region to be marked as read-only (turning it into a ROM). | |
667 | * only useful on RAM regions. | |
668 | * | |
669 | * @mr: the region being updated. | |
670 | * @readonly: whether rhe region is to be ROM or RAM. | |
671 | */ | |
672 | void memory_region_set_readonly(MemoryRegion *mr, bool readonly); | |
673 | ||
d0a9b5bc | 674 | /** |
5f9a5ea1 | 675 | * memory_region_rom_device_set_romd: enable/disable ROMD mode |
d0a9b5bc AK |
676 | * |
677 | * Allows a ROM device (initialized with memory_region_init_rom_device() to | |
5f9a5ea1 JK |
678 | * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the |
679 | * device is mapped to guest memory and satisfies read access directly. | |
680 | * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function. | |
681 | * Writes are always handled by the #MemoryRegion.write function. | |
d0a9b5bc AK |
682 | * |
683 | * @mr: the memory region to be updated | |
5f9a5ea1 | 684 | * @romd_mode: %true to put the region into ROMD mode |
d0a9b5bc | 685 | */ |
5f9a5ea1 | 686 | void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode); |
d0a9b5bc | 687 | |
093bc2cd AK |
688 | /** |
689 | * memory_region_set_coalescing: Enable memory coalescing for the region. | |
690 | * | |
691 | * Enabled writes to a region to be queued for later processing. MMIO ->write | |
692 | * callbacks may be delayed until a non-coalesced MMIO is issued. | |
693 | * Only useful for IO regions. Roughly similar to write-combining hardware. | |
694 | * | |
695 | * @mr: the memory region to be write coalesced | |
696 | */ | |
697 | void memory_region_set_coalescing(MemoryRegion *mr); | |
698 | ||
699 | /** | |
700 | * memory_region_add_coalescing: Enable memory coalescing for a sub-range of | |
701 | * a region. | |
702 | * | |
703 | * Like memory_region_set_coalescing(), but works on a sub-range of a region. | |
704 | * Multiple calls can be issued coalesced disjoint ranges. | |
705 | * | |
706 | * @mr: the memory region to be updated. | |
707 | * @offset: the start of the range within the region to be coalesced. | |
708 | * @size: the size of the subrange to be coalesced. | |
709 | */ | |
710 | void memory_region_add_coalescing(MemoryRegion *mr, | |
a8170e5e | 711 | hwaddr offset, |
093bc2cd AK |
712 | uint64_t size); |
713 | ||
714 | /** | |
715 | * memory_region_clear_coalescing: Disable MMIO coalescing for the region. | |
716 | * | |
717 | * Disables any coalescing caused by memory_region_set_coalescing() or | |
718 | * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory | |
719 | * hardware. | |
720 | * | |
721 | * @mr: the memory region to be updated. | |
722 | */ | |
723 | void memory_region_clear_coalescing(MemoryRegion *mr); | |
724 | ||
d410515e JK |
725 | /** |
726 | * memory_region_set_flush_coalesced: Enforce memory coalescing flush before | |
727 | * accesses. | |
728 | * | |
729 | * Ensure that pending coalesced MMIO request are flushed before the memory | |
730 | * region is accessed. This property is automatically enabled for all regions | |
731 | * passed to memory_region_set_coalescing() and memory_region_add_coalescing(). | |
732 | * | |
733 | * @mr: the memory region to be updated. | |
734 | */ | |
735 | void memory_region_set_flush_coalesced(MemoryRegion *mr); | |
736 | ||
737 | /** | |
738 | * memory_region_clear_flush_coalesced: Disable memory coalescing flush before | |
739 | * accesses. | |
740 | * | |
741 | * Clear the automatic coalesced MMIO flushing enabled via | |
742 | * memory_region_set_flush_coalesced. Note that this service has no effect on | |
743 | * memory regions that have MMIO coalescing enabled for themselves. For them, | |
744 | * automatic flushing will stop once coalescing is disabled. | |
745 | * | |
746 | * @mr: the memory region to be updated. | |
747 | */ | |
748 | void memory_region_clear_flush_coalesced(MemoryRegion *mr); | |
749 | ||
3e9d69e7 AK |
750 | /** |
751 | * memory_region_add_eventfd: Request an eventfd to be triggered when a word | |
752 | * is written to a location. | |
753 | * | |
754 | * Marks a word in an IO region (initialized with memory_region_init_io()) | |
755 | * as a trigger for an eventfd event. The I/O callback will not be called. | |
69ddaf66 | 756 | * The caller must be prepared to handle failure (that is, take the required |
3e9d69e7 AK |
757 | * action if the callback _is_ called). |
758 | * | |
759 | * @mr: the memory region being updated. | |
760 | * @addr: the address within @mr that is to be monitored | |
761 | * @size: the size of the access to trigger the eventfd | |
762 | * @match_data: whether to match against @data, instead of just @addr | |
763 | * @data: the data to match against the guest write | |
764 | * @fd: the eventfd to be triggered when @addr, @size, and @data all match. | |
765 | **/ | |
766 | void memory_region_add_eventfd(MemoryRegion *mr, | |
a8170e5e | 767 | hwaddr addr, |
3e9d69e7 AK |
768 | unsigned size, |
769 | bool match_data, | |
770 | uint64_t data, | |
753d5e14 | 771 | EventNotifier *e); |
3e9d69e7 AK |
772 | |
773 | /** | |
69ddaf66 | 774 | * memory_region_del_eventfd: Cancel an eventfd. |
3e9d69e7 | 775 | * |
69ddaf66 ASRJ |
776 | * Cancels an eventfd trigger requested by a previous |
777 | * memory_region_add_eventfd() call. | |
3e9d69e7 AK |
778 | * |
779 | * @mr: the memory region being updated. | |
780 | * @addr: the address within @mr that is to be monitored | |
781 | * @size: the size of the access to trigger the eventfd | |
782 | * @match_data: whether to match against @data, instead of just @addr | |
783 | * @data: the data to match against the guest write | |
784 | * @fd: the eventfd to be triggered when @addr, @size, and @data all match. | |
785 | */ | |
786 | void memory_region_del_eventfd(MemoryRegion *mr, | |
a8170e5e | 787 | hwaddr addr, |
3e9d69e7 AK |
788 | unsigned size, |
789 | bool match_data, | |
790 | uint64_t data, | |
753d5e14 PB |
791 | EventNotifier *e); |
792 | ||
093bc2cd | 793 | /** |
69ddaf66 | 794 | * memory_region_add_subregion: Add a subregion to a container. |
093bc2cd | 795 | * |
69ddaf66 | 796 | * Adds a subregion at @offset. The subregion may not overlap with other |
093bc2cd AK |
797 | * subregions (except for those explicitly marked as overlapping). A region |
798 | * may only be added once as a subregion (unless removed with | |
799 | * memory_region_del_subregion()); use memory_region_init_alias() if you | |
800 | * want a region to be a subregion in multiple locations. | |
801 | * | |
802 | * @mr: the region to contain the new subregion; must be a container | |
803 | * initialized with memory_region_init(). | |
804 | * @offset: the offset relative to @mr where @subregion is added. | |
805 | * @subregion: the subregion to be added. | |
806 | */ | |
807 | void memory_region_add_subregion(MemoryRegion *mr, | |
a8170e5e | 808 | hwaddr offset, |
093bc2cd AK |
809 | MemoryRegion *subregion); |
810 | /** | |
1a7e8cae BZ |
811 | * memory_region_add_subregion_overlap: Add a subregion to a container |
812 | * with overlap. | |
093bc2cd | 813 | * |
69ddaf66 | 814 | * Adds a subregion at @offset. The subregion may overlap with other |
093bc2cd AK |
815 | * subregions. Conflicts are resolved by having a higher @priority hide a |
816 | * lower @priority. Subregions without priority are taken as @priority 0. | |
817 | * A region may only be added once as a subregion (unless removed with | |
818 | * memory_region_del_subregion()); use memory_region_init_alias() if you | |
819 | * want a region to be a subregion in multiple locations. | |
820 | * | |
821 | * @mr: the region to contain the new subregion; must be a container | |
822 | * initialized with memory_region_init(). | |
823 | * @offset: the offset relative to @mr where @subregion is added. | |
824 | * @subregion: the subregion to be added. | |
825 | * @priority: used for resolving overlaps; highest priority wins. | |
826 | */ | |
827 | void memory_region_add_subregion_overlap(MemoryRegion *mr, | |
a8170e5e | 828 | hwaddr offset, |
093bc2cd | 829 | MemoryRegion *subregion, |
a1ff8ae0 | 830 | int priority); |
e34911c4 AK |
831 | |
832 | /** | |
833 | * memory_region_get_ram_addr: Get the ram address associated with a memory | |
834 | * region | |
835 | * | |
dabdf394 | 836 | * DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen |
e34911c4 AK |
837 | * code is being reworked. |
838 | */ | |
839 | ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr); | |
840 | ||
093bc2cd AK |
841 | /** |
842 | * memory_region_del_subregion: Remove a subregion. | |
843 | * | |
844 | * Removes a subregion from its container. | |
845 | * | |
846 | * @mr: the container to be updated. | |
847 | * @subregion: the region being removed; must be a current subregion of @mr. | |
848 | */ | |
849 | void memory_region_del_subregion(MemoryRegion *mr, | |
850 | MemoryRegion *subregion); | |
851 | ||
6bba19ba AK |
852 | /* |
853 | * memory_region_set_enabled: dynamically enable or disable a region | |
854 | * | |
855 | * Enables or disables a memory region. A disabled memory region | |
856 | * ignores all accesses to itself and its subregions. It does not | |
857 | * obscure sibling subregions with lower priority - it simply behaves as | |
858 | * if it was removed from the hierarchy. | |
859 | * | |
860 | * Regions default to being enabled. | |
861 | * | |
862 | * @mr: the region to be updated | |
863 | * @enabled: whether to enable or disable the region | |
864 | */ | |
865 | void memory_region_set_enabled(MemoryRegion *mr, bool enabled); | |
866 | ||
2282e1af AK |
867 | /* |
868 | * memory_region_set_address: dynamically update the address of a region | |
869 | * | |
feca4ac1 | 870 | * Dynamically updates the address of a region, relative to its container. |
2282e1af AK |
871 | * May be used on regions are currently part of a memory hierarchy. |
872 | * | |
873 | * @mr: the region to be updated | |
feca4ac1 | 874 | * @addr: new address, relative to container region |
2282e1af | 875 | */ |
a8170e5e | 876 | void memory_region_set_address(MemoryRegion *mr, hwaddr addr); |
2282e1af | 877 | |
4703359e AK |
878 | /* |
879 | * memory_region_set_alias_offset: dynamically update a memory alias's offset | |
880 | * | |
881 | * Dynamically updates the offset into the target region that an alias points | |
882 | * to, as if the fourth argument to memory_region_init_alias() has changed. | |
883 | * | |
884 | * @mr: the #MemoryRegion to be updated; should be an alias. | |
885 | * @offset: the new offset into the target memory region | |
886 | */ | |
887 | void memory_region_set_alias_offset(MemoryRegion *mr, | |
a8170e5e | 888 | hwaddr offset); |
4703359e | 889 | |
3ce10901 | 890 | /** |
feca4ac1 PB |
891 | * memory_region_present: checks if an address relative to a @container |
892 | * translates into #MemoryRegion within @container | |
3ce10901 | 893 | * |
feca4ac1 | 894 | * Answer whether a #MemoryRegion within @container covers the address |
3ce10901 PB |
895 | * @addr. |
896 | * | |
feca4ac1 PB |
897 | * @container: a #MemoryRegion within which @addr is a relative address |
898 | * @addr: the area within @container to be searched | |
3ce10901 | 899 | */ |
feca4ac1 | 900 | bool memory_region_present(MemoryRegion *container, hwaddr addr); |
3ce10901 | 901 | |
eed2bacf IM |
902 | /** |
903 | * memory_region_is_mapped: returns true if #MemoryRegion is mapped | |
904 | * into any address space. | |
905 | * | |
906 | * @mr: a #MemoryRegion which should be checked if it's mapped | |
907 | */ | |
908 | bool memory_region_is_mapped(MemoryRegion *mr); | |
909 | ||
e2177955 | 910 | /** |
73034e9e PB |
911 | * memory_region_find: translate an address/size relative to a |
912 | * MemoryRegion into a #MemoryRegionSection. | |
e2177955 | 913 | * |
73034e9e PB |
914 | * Locates the first #MemoryRegion within @mr that overlaps the range |
915 | * given by @addr and @size. | |
e2177955 AK |
916 | * |
917 | * Returns a #MemoryRegionSection that describes a contiguous overlap. | |
918 | * It will have the following characteristics: | |
e2177955 AK |
919 | * .@size = 0 iff no overlap was found |
920 | * .@mr is non-%NULL iff an overlap was found | |
921 | * | |
73034e9e PB |
922 | * Remember that in the return value the @offset_within_region is |
923 | * relative to the returned region (in the .@mr field), not to the | |
924 | * @mr argument. | |
925 | * | |
926 | * Similarly, the .@offset_within_address_space is relative to the | |
927 | * address space that contains both regions, the passed and the | |
928 | * returned one. However, in the special case where the @mr argument | |
feca4ac1 | 929 | * has no container (and thus is the root of the address space), the |
73034e9e PB |
930 | * following will hold: |
931 | * .@offset_within_address_space >= @addr | |
932 | * .@offset_within_address_space + .@size <= @addr + @size | |
933 | * | |
934 | * @mr: a MemoryRegion within which @addr is a relative address | |
935 | * @addr: start of the area within @as to be searched | |
e2177955 AK |
936 | * @size: size of the area to be searched |
937 | */ | |
73034e9e | 938 | MemoryRegionSection memory_region_find(MemoryRegion *mr, |
a8170e5e | 939 | hwaddr addr, uint64_t size); |
e2177955 | 940 | |
86e775c6 | 941 | /** |
1d671369 | 942 | * address_space_sync_dirty_bitmap: synchronize the dirty log for all memory |
86e775c6 AK |
943 | * |
944 | * Synchronizes the dirty page log for an entire address space. | |
1d671369 | 945 | * @as: the address space that contains the memory being synchronized |
86e775c6 | 946 | */ |
1d671369 | 947 | void address_space_sync_dirty_bitmap(AddressSpace *as); |
86e775c6 | 948 | |
69ddaf66 ASRJ |
949 | /** |
950 | * memory_region_transaction_begin: Start a transaction. | |
951 | * | |
952 | * During a transaction, changes will be accumulated and made visible | |
dabdf394 | 953 | * only when the transaction ends (is committed). |
4ef4db86 AK |
954 | */ |
955 | void memory_region_transaction_begin(void); | |
69ddaf66 ASRJ |
956 | |
957 | /** | |
958 | * memory_region_transaction_commit: Commit a transaction and make changes | |
959 | * visible to the guest. | |
4ef4db86 AK |
960 | */ |
961 | void memory_region_transaction_commit(void); | |
962 | ||
7664e80c AK |
963 | /** |
964 | * memory_listener_register: register callbacks to be called when memory | |
965 | * sections are mapped or unmapped into an address | |
966 | * space | |
967 | * | |
968 | * @listener: an object containing the callbacks to be called | |
7376e582 | 969 | * @filter: if non-%NULL, only regions in this address space will be observed |
7664e80c | 970 | */ |
f6790af6 | 971 | void memory_listener_register(MemoryListener *listener, AddressSpace *filter); |
7664e80c AK |
972 | |
973 | /** | |
974 | * memory_listener_unregister: undo the effect of memory_listener_register() | |
975 | * | |
976 | * @listener: an object containing the callbacks to be removed | |
977 | */ | |
978 | void memory_listener_unregister(MemoryListener *listener); | |
979 | ||
980 | /** | |
981 | * memory_global_dirty_log_start: begin dirty logging for all regions | |
982 | */ | |
983 | void memory_global_dirty_log_start(void); | |
984 | ||
985 | /** | |
1a7e8cae | 986 | * memory_global_dirty_log_stop: end dirty logging for all regions |
7664e80c AK |
987 | */ |
988 | void memory_global_dirty_log_stop(void); | |
989 | ||
314e2987 BS |
990 | void mtree_info(fprintf_function mon_printf, void *f); |
991 | ||
9ad2bbc1 AK |
992 | /** |
993 | * address_space_init: initializes an address space | |
994 | * | |
995 | * @as: an uninitialized #AddressSpace | |
996 | * @root: a #MemoryRegion that routes addesses for the address space | |
7dca8043 AK |
997 | * @name: an address space name. The name is only used for debugging |
998 | * output. | |
9ad2bbc1 | 999 | */ |
7dca8043 | 1000 | void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name); |
9ad2bbc1 | 1001 | |
83f3c251 AK |
1002 | |
1003 | /** | |
1004 | * address_space_destroy: destroy an address space | |
1005 | * | |
1006 | * Releases all resources associated with an address space. After an address space | |
1007 | * is destroyed, its root memory region (given by address_space_init()) may be destroyed | |
1008 | * as well. | |
1009 | * | |
1010 | * @as: address space to be destroyed | |
1011 | */ | |
1012 | void address_space_destroy(AddressSpace *as); | |
1013 | ||
ac1970fb AK |
1014 | /** |
1015 | * address_space_rw: read from or write to an address space. | |
1016 | * | |
30951157 AK |
1017 | * Return true if the operation hit any unassigned memory or encountered an |
1018 | * IOMMU fault. | |
fd8aaa76 | 1019 | * |
ac1970fb AK |
1020 | * @as: #AddressSpace to be accessed |
1021 | * @addr: address within that address space | |
1022 | * @buf: buffer with the data transferred | |
1023 | * @is_write: indicates the transfer direction | |
1024 | */ | |
fd8aaa76 | 1025 | bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf, |
ac1970fb AK |
1026 | int len, bool is_write); |
1027 | ||
1028 | /** | |
1029 | * address_space_write: write to address space. | |
1030 | * | |
30951157 AK |
1031 | * Return true if the operation hit any unassigned memory or encountered an |
1032 | * IOMMU fault. | |
fd8aaa76 | 1033 | * |
ac1970fb AK |
1034 | * @as: #AddressSpace to be accessed |
1035 | * @addr: address within that address space | |
1036 | * @buf: buffer with the data transferred | |
1037 | */ | |
fd8aaa76 | 1038 | bool address_space_write(AddressSpace *as, hwaddr addr, |
ac1970fb AK |
1039 | const uint8_t *buf, int len); |
1040 | ||
1041 | /** | |
1042 | * address_space_read: read from an address space. | |
1043 | * | |
30951157 AK |
1044 | * Return true if the operation hit any unassigned memory or encountered an |
1045 | * IOMMU fault. | |
fd8aaa76 | 1046 | * |
ac1970fb AK |
1047 | * @as: #AddressSpace to be accessed |
1048 | * @addr: address within that address space | |
1049 | * @buf: buffer with the data transferred | |
1050 | */ | |
fd8aaa76 | 1051 | bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len); |
ac1970fb | 1052 | |
149f54b5 | 1053 | /* address_space_translate: translate an address range into an address space |
5c8a00ce | 1054 | * into a MemoryRegion and an address range into that section |
149f54b5 PB |
1055 | * |
1056 | * @as: #AddressSpace to be accessed | |
1057 | * @addr: address within that address space | |
1058 | * @xlat: pointer to address within the returned memory region section's | |
1059 | * #MemoryRegion. | |
1060 | * @len: pointer to length | |
1061 | * @is_write: indicates the transfer direction | |
1062 | */ | |
5c8a00ce PB |
1063 | MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr, |
1064 | hwaddr *xlat, hwaddr *len, | |
1065 | bool is_write); | |
149f54b5 | 1066 | |
51644ab7 PB |
1067 | /* address_space_access_valid: check for validity of accessing an address |
1068 | * space range | |
1069 | * | |
30951157 AK |
1070 | * Check whether memory is assigned to the given address space range, and |
1071 | * access is permitted by any IOMMU regions that are active for the address | |
1072 | * space. | |
51644ab7 PB |
1073 | * |
1074 | * For now, addr and len should be aligned to a page size. This limitation | |
1075 | * will be lifted in the future. | |
1076 | * | |
1077 | * @as: #AddressSpace to be accessed | |
1078 | * @addr: address within that address space | |
1079 | * @len: length of the area to be checked | |
1080 | * @is_write: indicates the transfer direction | |
1081 | */ | |
1082 | bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write); | |
1083 | ||
ac1970fb AK |
1084 | /* address_space_map: map a physical memory region into a host virtual address |
1085 | * | |
1086 | * May map a subset of the requested range, given by and returned in @plen. | |
1087 | * May return %NULL if resources needed to perform the mapping are exhausted. | |
1088 | * Use only for reads OR writes - not for read-modify-write operations. | |
1089 | * Use cpu_register_map_client() to know when retrying the map operation is | |
1090 | * likely to succeed. | |
1091 | * | |
1092 | * @as: #AddressSpace to be accessed | |
1093 | * @addr: address within that address space | |
1094 | * @plen: pointer to length of buffer; updated on return | |
1095 | * @is_write: indicates the transfer direction | |
1096 | */ | |
a8170e5e AK |
1097 | void *address_space_map(AddressSpace *as, hwaddr addr, |
1098 | hwaddr *plen, bool is_write); | |
ac1970fb AK |
1099 | |
1100 | /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map() | |
1101 | * | |
1102 | * Will also mark the memory as dirty if @is_write == %true. @access_len gives | |
1103 | * the amount of memory that was actually read or written by the caller. | |
1104 | * | |
1105 | * @as: #AddressSpace used | |
1106 | * @addr: address within that address space | |
1107 | * @len: buffer length as returned by address_space_map() | |
1108 | * @access_len: amount of data actually transferred | |
1109 | * @is_write: indicates the transfer direction | |
1110 | */ | |
a8170e5e AK |
1111 | void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, |
1112 | int is_write, hwaddr access_len); | |
ac1970fb AK |
1113 | |
1114 | ||
093bc2cd AK |
1115 | #endif |
1116 | ||
1117 | #endif |