#define mapcache_unlock() ((void)0)
typedef struct MapCacheEntry {
- target_phys_addr_t paddr_index;
+ hwaddr paddr_index;
uint8_t *vaddr_base;
unsigned long *valid_mapping;
uint8_t lock;
- target_phys_addr_t size;
+ hwaddr size;
struct MapCacheEntry *next;
} MapCacheEntry;
typedef struct MapCacheRev {
uint8_t *vaddr_req;
- target_phys_addr_t paddr_index;
- target_phys_addr_t size;
+ hwaddr paddr_index;
+ hwaddr size;
QTAILQ_ENTRY(MapCacheRev) next;
} MapCacheRev;
QTAILQ_HEAD(map_cache_head, MapCacheRev) locked_entries;
/* For most cases (>99.9%), the page address is the same. */
- target_phys_addr_t last_address_index;
+ hwaddr last_address_index;
uint8_t *last_address_vaddr;
unsigned long max_mcache_size;
unsigned int mcache_bucket_shift;
}
static void xen_remap_bucket(MapCacheEntry *entry,
- target_phys_addr_t size,
- target_phys_addr_t address_index)
+ hwaddr size,
+ hwaddr address_index)
{
uint8_t *vaddr_base;
xen_pfn_t *pfns;
int *err;
unsigned int i;
- target_phys_addr_t nb_pfn = size >> XC_PAGE_SHIFT;
+ hwaddr nb_pfn = size >> XC_PAGE_SHIFT;
trace_xen_remap_bucket(address_index);
g_free(err);
}
-uint8_t *xen_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size,
+uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
uint8_t lock)
{
MapCacheEntry *entry, *pentry = NULL;
- target_phys_addr_t address_index;
- target_phys_addr_t address_offset;
- target_phys_addr_t __size = size;
+ hwaddr address_index;
+ hwaddr address_offset;
+ hwaddr __size = size;
bool translated = false;
tryagain:
{
MapCacheEntry *entry = NULL;
MapCacheRev *reventry;
- target_phys_addr_t paddr_index;
- target_phys_addr_t size;
+ hwaddr paddr_index;
+ hwaddr size;
int found = 0;
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
{
MapCacheEntry *entry = NULL, *pentry = NULL;
MapCacheRev *reventry;
- target_phys_addr_t paddr_index;
- target_phys_addr_t size;
+ hwaddr paddr_index;
+ hwaddr size;
int found = 0;
QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {