hwaddr addr,
bool resolve_subpage)
{
- MemoryRegionSection *section = atomic_read(&d->mru_section);
+ MemoryRegionSection *section = qatomic_read(&d->mru_section);
subpage_t *subpage;
if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] ||
!section_covers_addr(section, addr)) {
section = phys_page_find(d, addr);
- atomic_set(&d->mru_section, section);
+ qatomic_set(&d->mru_section, section);
}
if (resolve_subpage && section->mr->subpage) {
subpage = container_of(section->mr, subpage_t, iomem);
IOMMUMemoryRegionClass *imrc;
IOMMUTLBEntry iotlb;
int iommu_idx;
- AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
+ AddressSpaceDispatch *d =
+ qatomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
for (;;) {
section = address_space_translate_internal(d, addr, &addr, plen, false);
{
RAMBlock *block;
- block = atomic_rcu_read(&ram_list.mru_block);
+ block = qatomic_rcu_read(&ram_list.mru_block);
if (block && addr - block->offset < block->max_length) {
return block;
}
* call_rcu(reclaim_ramblock, xxx);
* rcu_read_unlock()
*
- * atomic_rcu_set is not needed here. The block was already published
+ * qatomic_rcu_set is not needed here. The block was already published
* when it was placed into the list. Here we're just making an extra
* copy of the pointer.
*/
page = start_page;
WITH_RCU_READ_LOCK_GUARD() {
- blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+ blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
ramblock = qemu_get_ram_block(start);
/* Range sanity check on the ramblock */
assert(start >= ramblock->offset &&
dest = 0;
WITH_RCU_READ_LOCK_GUARD() {
- blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+ blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
while (page < end) {
unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
DirtyMemoryBlocks *new_blocks;
int j;
- old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
+ old_blocks = qatomic_rcu_read(&ram_list.dirty_memory[i]);
new_blocks = g_malloc(sizeof(*new_blocks) +
sizeof(new_blocks->blocks[0]) * new_num_blocks);
new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
}
- atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
+ qatomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
if (old_blocks) {
g_free_rcu(old_blocks, rcu);
}
RCU_READ_LOCK_GUARD();
- block = atomic_rcu_read(&ram_list.mru_block);
+ block = qatomic_rcu_read(&ram_list.mru_block);
if (block && block->host && host - block->host < block->max_length) {
goto found;
}
{
int asidx = cpu_asidx_from_attrs(cpu, attrs);
CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
- AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
+ AddressSpaceDispatch *d = qatomic_rcu_read(&cpuas->memory_dispatch);
MemoryRegionSection *sections = d->map.sections;
return §ions[index & ~TARGET_PAGE_MASK];
* may have split the RCU critical section.
*/
d = address_space_to_dispatch(cpuas->as);
- atomic_rcu_set(&cpuas->memory_dispatch, d);
+ qatomic_rcu_set(&cpuas->memory_dispatch, d);
tlb_flush(cpuas->cpu);
}
qemu_mutex_lock(&map_client_list_lock);
client->bh = bh;
QLIST_INSERT_HEAD(&map_client_list, client, link);
- if (!atomic_read(&bounce.in_use)) {
+ if (!qatomic_read(&bounce.in_use)) {
cpu_notify_map_clients_locked();
}
qemu_mutex_unlock(&map_client_list_lock);
mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
if (!memory_access_is_direct(mr, is_write)) {
- if (atomic_xchg(&bounce.in_use, true)) {
+ if (qatomic_xchg(&bounce.in_use, true)) {
*plen = 0;
return NULL;
}
qemu_vfree(bounce.buffer);
bounce.buffer = NULL;
memory_region_unref(bounce.mr);
- atomic_mb_set(&bounce.in_use, false);
+ qatomic_mb_set(&bounce.in_use, false);
cpu_notify_map_clients();
}
int old;
if (!state) {
- atomic_dec(&ram_block_discard_disabled);
+ qatomic_dec(&ram_block_discard_disabled);
return 0;
}
do {
- old = atomic_read(&ram_block_discard_disabled);
+ old = qatomic_read(&ram_block_discard_disabled);
if (old < 0) {
return -EBUSY;
}
- } while (atomic_cmpxchg(&ram_block_discard_disabled, old, old + 1) != old);
+ } while (qatomic_cmpxchg(&ram_block_discard_disabled,
+ old, old + 1) != old);
return 0;
}
int old;
if (!state) {
- atomic_inc(&ram_block_discard_disabled);
+ qatomic_inc(&ram_block_discard_disabled);
return 0;
}
do {
- old = atomic_read(&ram_block_discard_disabled);
+ old = qatomic_read(&ram_block_discard_disabled);
if (old > 0) {
return -EBUSY;
}
- } while (atomic_cmpxchg(&ram_block_discard_disabled, old, old - 1) != old);
+ } while (qatomic_cmpxchg(&ram_block_discard_disabled,
+ old, old - 1) != old);
return 0;
}
bool ram_block_discard_is_disabled(void)
{
- return atomic_read(&ram_block_discard_disabled) > 0;
+ return qatomic_read(&ram_block_discard_disabled) > 0;
}
bool ram_block_discard_is_required(void)
{
- return atomic_read(&ram_block_discard_disabled) < 0;
+ return qatomic_read(&ram_block_discard_disabled) < 0;
}
#endif