+static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
+{
+ if (!caches) {
+ return;
+ }
+
+ address_space_cache_destroy(&caches->desc);
+ address_space_cache_destroy(&caches->avail);
+ address_space_cache_destroy(&caches->used);
+ g_free(caches);
+}
+
+static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
+{
+ VRingMemoryRegionCaches *caches;
+
+ caches = atomic_read(&vq->vring.caches);
+ atomic_rcu_set(&vq->vring.caches, NULL);
+ if (caches) {
+ call_rcu(caches, virtio_free_region_cache, rcu);
+ }
+}
+
+static void virtio_init_region_cache(VirtIODevice *vdev, int n)
+{
+ VirtQueue *vq = &vdev->vq[n];
+ VRingMemoryRegionCaches *old = vq->vring.caches;
+ VRingMemoryRegionCaches *new = NULL;
+ hwaddr addr, size;
+ int event_size;
+ int64_t len;
+
+ event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+
+ addr = vq->vring.desc;
+ if (!addr) {
+ goto out_no_cache;
+ }
+ new = g_new0(VRingMemoryRegionCaches, 1);
+ size = virtio_queue_get_desc_size(vdev, n);
+ len = address_space_cache_init(&new->desc, vdev->dma_as,
+ addr, size, false);
+ if (len < size) {
+ virtio_error(vdev, "Cannot map desc");
+ goto err_desc;
+ }
+
+ size = virtio_queue_get_used_size(vdev, n) + event_size;
+ len = address_space_cache_init(&new->used, vdev->dma_as,
+ vq->vring.used, size, true);
+ if (len < size) {
+ virtio_error(vdev, "Cannot map used");
+ goto err_used;
+ }
+
+ size = virtio_queue_get_avail_size(vdev, n) + event_size;
+ len = address_space_cache_init(&new->avail, vdev->dma_as,
+ vq->vring.avail, size, false);
+ if (len < size) {
+ virtio_error(vdev, "Cannot map avail");
+ goto err_avail;
+ }
+
+ atomic_rcu_set(&vq->vring.caches, new);
+ if (old) {
+ call_rcu(old, virtio_free_region_cache, rcu);
+ }
+ return;
+
+err_avail:
+ address_space_cache_destroy(&new->avail);
+err_used:
+ address_space_cache_destroy(&new->used);
+err_desc:
+ address_space_cache_destroy(&new->desc);
+out_no_cache:
+ g_free(new);
+ virtio_virtqueue_reset_region_cache(vq);
+}
+