/* Notify for address space range start <= addr <= end */
hwaddr start;
hwaddr end;
+ int iommu_idx;
QLIST_ENTRY(IOMMUNotifier) node;
};
typedef struct IOMMUNotifier IOMMUNotifier;
+/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
+#define RAM_PREALLOC (1 << 0)
+
+/* RAM is mmap-ed with MAP_SHARED */
+#define RAM_SHARED (1 << 1)
+
+/* Only a portion of RAM (used_length) is actually used, and migrated.
+ * This used_length size can change across reboots.
+ */
+#define RAM_RESIZEABLE (1 << 2)
+
+/* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
+ * zero the page and wake waiting processes.
+ * (Set during postcopy)
+ */
+#define RAM_UF_ZEROPAGE (1 << 3)
+
+/* RAM can be migrated */
+#define RAM_MIGRATABLE (1 << 4)
+
+/* RAM is a persistent kind memory */
+#define RAM_PMEM (1 << 5)
+
static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
IOMMUNotifierFlag flags,
- hwaddr start, hwaddr end)
+ hwaddr start, hwaddr end,
+ int iommu_idx)
{
n->notify = fn;
n->notifier_flags = flags;
n->start = start;
n->end = end;
+ n->iommu_idx = iommu_idx;
}
/*
uint64_t data,
unsigned size,
MemTxAttrs attrs);
- /* Instruction execution pre-callback:
- * @addr is the address of the access relative to the @mr.
- * @size is the size of the area returned by the callback.
- * @offset is the location of the pointer inside @mr.
- *
- * Returns a pointer to a location which contains guest code.
- */
- void *(*request_ptr)(void *opaque, hwaddr addr, unsigned *size,
- unsigned *offset);
enum device_endian endianness;
/* Guest-visible constraints: */
* as a machine check exception).
*/
bool (*accepts)(void *opaque, hwaddr addr,
- unsigned size, bool is_write);
+ unsigned size, bool is_write,
+ MemTxAttrs attrs);
} valid;
/* Internal implementation constraints: */
struct {
*/
bool unaligned;
} impl;
-
- /* If .read and .write are not present, old_mmio may be used for
- * backwards compatibility with old mmio registration
- */
- const MemoryRegionMmio old_mmio;
};
enum IOMMUMemoryRegionAttr {
IOMMU_ATTR_SPAPR_TCE_FD
};
+/**
+ * IOMMUMemoryRegionClass:
+ *
+ * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
+ * and provide an implementation of at least the @translate method here
+ * to handle requests to the memory region. Other methods are optional.
+ *
+ * The IOMMU implementation must use the IOMMU notifier infrastructure
+ * to report whenever mappings are changed, by calling
+ * memory_region_notify_iommu() (or, if necessary, by calling
+ * memory_region_notify_one() for each registered notifier).
+ *
+ * Conceptually an IOMMU provides a mapping from input address
+ * to an output TLB entry. If the IOMMU is aware of memory transaction
+ * attributes and the output TLB entry depends on the transaction
+ * attributes, we represent this using IOMMU indexes. Each index
+ * selects a particular translation table that the IOMMU has:
+ * @attrs_to_index returns the IOMMU index for a set of transaction attributes
+ * @translate takes an input address and an IOMMU index
+ * and the mapping returned can only depend on the input address and the
+ * IOMMU index.
+ *
+ * Most IOMMUs don't care about the transaction attributes and support
+ * only a single IOMMU index. A more complex IOMMU might have one index
+ * for secure transactions and one for non-secure transactions.
+ */
typedef struct IOMMUMemoryRegionClass {
/* private */
struct DeviceClass parent_class;
/*
- * Return a TLB entry that contains a given address. Flag should
- * be the access permission of this translation operation. We can
- * set flag to IOMMU_NONE to mean that we don't need any
- * read/write permission checks, like, when for region replay.
+ * Return a TLB entry that contains a given address.
+ *
+ * The IOMMUAccessFlags indicated via @flag are optional and may
+ * be specified as IOMMU_NONE to indicate that the caller needs
+ * the full translation information for both reads and writes. If
+ * the access flags are specified then the IOMMU implementation
+ * may use this as an optimization, to stop doing a page table
+ * walk as soon as it knows that the requested permissions are not
+ * allowed. If IOMMU_NONE is passed then the IOMMU must do the
+ * full page table walk and report the permissions in the returned
+ * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
+ * return different mappings for reads and writes.)
+ *
+ * The returned information remains valid while the caller is
+ * holding the big QEMU lock or is inside an RCU critical section;
+ * if the caller wishes to cache the mapping beyond that it must
+ * register an IOMMU notifier so it can invalidate its cached
+ * information when the IOMMU mapping changes.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ * @hwaddr: address to be translated within the memory region
+ * @flag: requested access permissions
+ * @iommu_idx: IOMMU index for the translation
*/
IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
- IOMMUAccessFlags flag);
- /* Returns minimum supported page size */
+ IOMMUAccessFlags flag, int iommu_idx);
+ /* Returns minimum supported page size in bytes.
+ * If this method is not provided then the minimum is assumed to
+ * be TARGET_PAGE_SIZE.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ */
uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
- /* Called when IOMMU Notifier flag changed */
+ /* Called when IOMMU Notifier flag changes (ie when the set of
+ * events which IOMMU users are requesting notification for changes).
+ * Optional method -- need not be provided if the IOMMU does not
+ * need to know exactly which events must be notified.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ * @old_flags: events which previously needed to be notified
+ * @new_flags: events which now need to be notified
+ */
void (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
IOMMUNotifierFlag old_flags,
IOMMUNotifierFlag new_flags);
- /* Set this up to provide customized IOMMU replay function */
+ /* Called to handle memory_region_iommu_replay().
+ *
+ * The default implementation of memory_region_iommu_replay() is to
+ * call the IOMMU translate method for every page in the address space
+ * with flag == IOMMU_NONE and then call the notifier if translate
+ * returns a valid mapping. If this method is implemented then it
+ * overrides the default behaviour, and must provide the full semantics
+ * of memory_region_iommu_replay(), by calling @notifier for every
+ * translation present in the IOMMU.
+ *
+ * Optional method -- an IOMMU only needs to provide this method
+ * if the default is inefficient or produces undesirable side effects.
+ *
+ * Note: this is not related to record-and-replay functionality.
+ */
void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
- /* Get IOMMU misc attributes */
- int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr,
+ /* Get IOMMU misc attributes. This is an optional method that
+ * can be used to allow users of the IOMMU to get implementation-specific
+ * information. The IOMMU implements this method to handle calls
+ * by IOMMU users to memory_region_iommu_get_attr() by filling in
+ * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
+ * the IOMMU supports. If the method is unimplemented then
+ * memory_region_iommu_get_attr() will always return -EINVAL.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ * @attr: attribute being queried
+ * @data: memory to fill in with the attribute data
+ *
+ * Returns 0 on success, or a negative errno; in particular
+ * returns -EINVAL for unrecognized or unimplemented attribute types.
+ */
+ int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
void *data);
+
+ /* Return the IOMMU index to use for a given set of transaction attributes.
+ *
+ * Optional method: if an IOMMU only supports a single IOMMU index then
+ * the default implementation of memory_region_iommu_attrs_to_index()
+ * will return 0.
+ *
+ * The indexes supported by an IOMMU must be contiguous, starting at 0.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ * @attrs: memory transaction attributes
+ */
+ int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
+
+ /* Return the number of IOMMU indexes this IOMMU supports.
+ *
+ * Optional method: if this method is not provided, then
+ * memory_region_iommu_num_indexes() will return 1, indicating that
+ * only a single IOMMU index is supported.
+ *
+ * @iommu: the IOMMUMemoryRegion
+ */
+ int (*num_indexes)(IOMMUMemoryRegion *iommu);
} IOMMUMemoryRegionClass;
typedef struct CoalescedMemoryRange CoalescedMemoryRange;
uint64_t length,
void *host),
Error **errp);
-#ifdef __linux__
+#ifdef CONFIG_POSIX
+
/**
* memory_region_init_ram_from_file: Initialize RAM memory region with a
* mmap-ed backend.
* @size: size of the region.
* @align: alignment of the region base address; if 0, the default alignment
* (getpagesize()) will be used.
- * @share: %true if memory must be mmaped with the MAP_SHARED flag
+ * @ram_flags: Memory region features:
+ * - RAM_SHARED: memory must be mmaped with the MAP_SHARED flag
+ * - RAM_PMEM: the memory is persistent memory
+ * Other bits are ignored now.
* @path: the path in which to allocate the RAM.
* @errp: pointer to Error*, to store an error if it happens.
*
const char *name,
uint64_t size,
uint64_t align,
- bool share,
+ uint32_t ram_flags,
const char *path,
Error **errp);
uint64_t size,
Error **errp);
-/**
- * memory_region_init_reservation: Initialize a memory region that reserves
- * I/O space.
- *
- * A reservation region primariy serves debugging purposes. It claims I/O
- * space that is not supposed to be handled by QEMU itself. Any access via
- * the memory API will cause an abort().
- * This function is deprecated. Use memory_region_init_io() with NULL
- * callbacks instead.
- *
- * @mr: the #MemoryRegion to be initialized
- * @owner: the object that tracks the region's reference count
- * @name: used for debugging; not visible to the user or ABI
- * @size: size of the region.
- */
-static inline void memory_region_init_reservation(MemoryRegion *mr,
- Object *owner,
- const char *name,
- uint64_t size)
-{
- memory_region_init_io(mr, owner, NULL, mr, name, size);
-}
-
/**
* memory_region_init_iommu: Initialize a memory region of a custom type
* that translates addresses
* An IOMMU region translates addresses and forwards accesses to a target
* memory region.
*
+ * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
+ * @_iommu_mr should be a pointer to enough memory for an instance of
+ * that subclass, @instance_size is the size of that subclass, and
+ * @mrtypename is its name. This function will initialize @_iommu_mr as an
+ * instance of the subclass, and its methods will then be called to handle
+ * accesses to the memory region. See the documentation of
+ * #IOMMUMemoryRegionClass for further details.
+ *
* @_iommu_mr: the #IOMMUMemoryRegion to be initialized
* @instance_size: the IOMMUMemoryRegion subclass instance size
* @mrtypename: the type name of the #IOMMUMemoryRegion
* should be notified with an UNMAP followed by a MAP.
*
* @iommu_mr: the memory region that was changed
+ * @iommu_idx: the IOMMU index for the translation table which has changed
* @entry: the new entry in the IOMMU translation table. The entry
* replaces all old entries for the same virtual I/O address range.
* Deleted entries have .@perm == 0.
*/
void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
+ int iommu_idx,
IOMMUTLBEntry entry);
/**
* a notifier with the minimum page granularity returned by
* mr->iommu_ops->get_page_size().
*
+ * Note: this is not related to record-and-replay functionality.
+ *
* @iommu_mr: the memory region to observe
* @n: the notifier to which to replay iommu mappings
*/
* memory_region_iommu_replay_all: replay existing IOMMU translations
* to all the notifiers registered.
*
+ * Note: this is not related to record-and-replay functionality.
+ *
* @iommu_mr: the memory region to observe
*/
void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr);
* memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
* defined on the IOMMU.
*
- * Returns 0 if succeded, error code otherwise.
+ * Returns 0 on success, or a negative errno otherwise. In particular,
+ * -EINVAL indicates that the IOMMU does not support the requested
+ * attribute.
*
* @iommu_mr: the memory region
* @attr: the requested attribute
enum IOMMUMemoryRegionAttr attr,
void *data);
+/**
+ * memory_region_iommu_attrs_to_index: return the IOMMU index to
+ * use for translations with the given memory transaction attributes.
+ *
+ * @iommu_mr: the memory region
+ * @attrs: the memory transaction attributes
+ */
+int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
+ MemTxAttrs attrs);
+
+/**
+ * memory_region_iommu_num_indexes: return the total number of IOMMU
+ * indexes that this IOMMU supports.
+ *
+ * @iommu_mr: the memory region
+ */
+int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
+
/**
* memory_region_name: get a memory region's name
*
void memory_global_dirty_log_stop(void);
void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
- bool dispatch_tree);
-
-/**
- * memory_region_request_mmio_ptr: request a pointer to an mmio
- * MemoryRegion. If it is possible map a RAM MemoryRegion with this pointer.
- * When the device wants to invalidate the pointer it will call
- * memory_region_invalidate_mmio_ptr.
- *
- * @mr: #MemoryRegion to check
- * @addr: address within that region
- *
- * Returns true on success, false otherwise.
- */
-bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr);
-
-/**
- * memory_region_invalidate_mmio_ptr: invalidate the pointer to an mmio
- * previously requested.
- * In the end that means that if something wants to execute from this area it
- * will need to request the pointer again.
- *
- * @mr: #MemoryRegion associated to the pointer.
- * @offset: offset within the memory region
- * @size: size of that area.
- */
-void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
- unsigned size);
+ bool dispatch_tree, bool owner);
/**
* memory_region_dispatch_read: perform a read directly to the specified
* @result: location to write the success/failure of the transaction;
* if NULL, this information is discarded
*/
-uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
- MemTxAttrs attrs, MemTxResult *result);
-
-uint32_t ldub_phys(AddressSpace *as, hwaddr addr);
-uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr);
-uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr);
-uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr);
-uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr);
-uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr);
-uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr);
-void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val);
-void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
-void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
-void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
-void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
-void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val);
-void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val);
+
+#define SUFFIX
+#define ARG1 as
+#define ARG1_DECL AddressSpace *as
+#include "exec/memory_ldst.inc.h"
+
+#define SUFFIX
+#define ARG1 as
+#define ARG1_DECL AddressSpace *as
+#include "exec/memory_ldst_phys.inc.h"
struct MemoryRegionCache {
+ void *ptr;
hwaddr xlat;
hwaddr len;
- AddressSpace *as;
+ FlatView *fv;
+ MemoryRegionSection mrs;
+ bool is_write;
};
-#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .as = NULL })
+#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL })
+
+
+/* address_space_ld*_cached: load from a cached #MemoryRegion
+ * address_space_st*_cached: store into a cached #MemoryRegion
+ *
+ * These functions perform a load or store of the byte, word,
+ * longword or quad to the specified address. The address is
+ * a physical address in the AddressSpace, but it must lie within
+ * a #MemoryRegion that was mapped with address_space_cache_init.
+ *
+ * The _le suffixed functions treat the data as little endian;
+ * _be indicates big endian; no suffix indicates "same endianness
+ * as guest CPU".
+ *
+ * The "guest CPU endianness" accessors are deprecated for use outside
+ * target-* code; devices should be CPU-agnostic and use either the LE
+ * or the BE accessors.
+ *
+ * @cache: previously initialized #MemoryRegionCache to be accessed
+ * @addr: address within the address space
+ * @val: data value, for stores
+ * @attrs: memory transaction attributes
+ * @result: location to write the success/failure of the transaction;
+ * if NULL, this information is discarded
+ */
+
+#define SUFFIX _cached_slow
+#define ARG1 cache
+#define ARG1_DECL MemoryRegionCache *cache
+#include "exec/memory_ldst.inc.h"
+
+/* Inline fast path for direct RAM access. */
+static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
+ hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
+{
+ assert(addr < cache->len);
+ if (likely(cache->ptr)) {
+ return ldub_p(cache->ptr + addr);
+ } else {
+ return address_space_ldub_cached_slow(cache, addr, attrs, result);
+ }
+}
+
+static inline void address_space_stb_cached(MemoryRegionCache *cache,
+ hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
+{
+ assert(addr < cache->len);
+ if (likely(cache->ptr)) {
+ stb_p(cache->ptr + addr, val);
+ } else {
+ address_space_stb_cached_slow(cache, addr, val, attrs, result);
+ }
+}
+
+#define ENDIANNESS _le
+#include "exec/memory_ldst_cached.inc.h"
+
+#define ENDIANNESS _be
+#include "exec/memory_ldst_cached.inc.h"
+
+#define SUFFIX _cached
+#define ARG1 cache
+#define ARG1_DECL MemoryRegionCache *cache
+#include "exec/memory_ldst_phys.inc.h"
/* address_space_cache_init: prepare for repeated access to a physical
* memory region
*/
void address_space_cache_destroy(MemoryRegionCache *cache);
-/* address_space_ld*_cached: load from a cached #MemoryRegion
- * address_space_st*_cached: store into a cached #MemoryRegion
- *
- * These functions perform a load or store of the byte, word,
- * longword or quad to the specified address. The address is
- * a physical address in the AddressSpace, but it must lie within
- * a #MemoryRegion that was mapped with address_space_cache_init.
- *
- * The _le suffixed functions treat the data as little endian;
- * _be indicates big endian; no suffix indicates "same endianness
- * as guest CPU".
- *
- * The "guest CPU endianness" accessors are deprecated for use outside
- * target-* code; devices should be CPU-agnostic and use either the LE
- * or the BE accessors.
- *
- * @cache: previously initialized #MemoryRegionCache to be accessed
- * @addr: address within the address space
- * @val: data value, for stores
- * @attrs: memory transaction attributes
- * @result: location to write the success/failure of the transaction;
- * if NULL, this information is discarded
- */
-uint32_t address_space_ldub_cached(MemoryRegionCache *cache, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint32_t address_space_lduw_le_cached(MemoryRegionCache *cache, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint32_t address_space_lduw_be_cached(MemoryRegionCache *cache, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint32_t address_space_ldl_le_cached(MemoryRegionCache *cache, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint32_t address_space_ldl_be_cached(MemoryRegionCache *cache, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint64_t address_space_ldq_le_cached(MemoryRegionCache *cache, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-uint64_t address_space_ldq_be_cached(MemoryRegionCache *cache, hwaddr addr,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stb_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stw_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stw_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stl_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stl_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stq_le_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val,
- MemTxAttrs attrs, MemTxResult *result);
-void address_space_stq_be_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val,
- MemTxAttrs attrs, MemTxResult *result);
-
-uint32_t ldub_phys_cached(MemoryRegionCache *cache, hwaddr addr);
-uint32_t lduw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
-uint32_t lduw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
-uint32_t ldl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
-uint32_t ldl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
-uint64_t ldq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
-uint64_t ldq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
-void stb_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
-void stw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
-void stw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
-void stl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
-void stl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
-void stq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
-void stq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
/* address_space_get_iotlb_entry: translate an address into an IOTLB
* entry. Should be called from an RCU critical section.
*/
IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
- bool is_write);
+ bool is_write, MemTxAttrs attrs);
/* address_space_translate: translate an address range into an address space
* into a MemoryRegion and an address range into that section. Should be
* #MemoryRegion.
* @len: pointer to length
* @is_write: indicates the transfer direction
+ * @attrs: memory attributes
*/
MemoryRegion *flatview_translate(FlatView *fv,
hwaddr addr, hwaddr *xlat,
- hwaddr *len, bool is_write);
+ hwaddr *len, bool is_write,
+ MemTxAttrs attrs);
static inline MemoryRegion *address_space_translate(AddressSpace *as,
hwaddr addr, hwaddr *xlat,
- hwaddr *len, bool is_write)
+ hwaddr *len, bool is_write,
+ MemTxAttrs attrs)
{
return flatview_translate(address_space_to_flatview(as),
- addr, xlat, len, is_write);
+ addr, xlat, len, is_write, attrs);
}
/* address_space_access_valid: check for validity of accessing an address
* @addr: address within that address space
* @len: length of the area to be checked
* @is_write: indicates the transfer direction
+ * @attrs: memory attributes
*/
-bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write);
+bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len,
+ bool is_write, MemTxAttrs attrs);
/* address_space_map: map a physical memory region into a host virtual address
*
* @addr: address within that address space
* @plen: pointer to length of buffer; updated on return
* @is_write: indicates the transfer direction
+ * @attrs: memory attributes
*/
void *address_space_map(AddressSpace *as, hwaddr addr,
- hwaddr *plen, bool is_write);
+ hwaddr *plen, bool is_write, MemTxAttrs attrs);
/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
*
MemoryRegion *mr);
void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
+/* Internal functions, part of the implementation of address_space_read_cached
+ * and address_space_write_cached. */
+void address_space_read_cached_slow(MemoryRegionCache *cache,
+ hwaddr addr, void *buf, int len);
+void address_space_write_cached_slow(MemoryRegionCache *cache,
+ hwaddr addr, const void *buf, int len);
+
static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
{
if (is_write) {
rcu_read_lock();
fv = address_space_to_flatview(as);
l = len;
- mr = flatview_translate(fv, addr, &addr1, &l, false);
+ mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
if (len == l && memory_access_is_direct(mr, false)) {
ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
memcpy(buf, ptr, len);
void *buf, int len)
{
assert(addr < cache->len && len <= cache->len - addr);
- address_space_read(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len);
+ if (likely(cache->ptr)) {
+ memcpy(buf, cache->ptr + addr, len);
+ } else {
+ address_space_read_cached_slow(cache, addr, buf, len);
+ }
}
/**
void *buf, int len)
{
assert(addr < cache->len && len <= cache->len - addr);
- address_space_write(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len);
+ if (likely(cache->ptr)) {
+ memcpy(cache->ptr + addr, buf, len);
+ } else {
+ address_space_write_cached_slow(cache, addr, buf, len);
+ }
}
#endif