1 /* Copyright 2012 Red Hat, Inc.
2 * Copyright IBM, Corp. 2012
4 * Based on Linux 2.6.39 vhost code:
5 * Copyright (C) 2009 Red Hat, Inc.
6 * Copyright (C) 2006 Rusty Russell IBM Corporation
11 * Inspiration, some code, and most witty comments come from
12 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
14 * This work is licensed under the terms of the GNU GPL, version 2.
19 #include "exec/memory.h"
20 #include "exec/address-spaces.h"
21 #include "hw/virtio/dataplane/vring.h"
22 #include "qemu/error-report.h"
24 /* vring_map can be coupled with vring_unmap or (if you still have the
25 * value returned in *mr) memory_region_unref.
27 static void *vring_map(MemoryRegion **mr, hwaddr phys, hwaddr len,
30 MemoryRegionSection section = memory_region_find(get_system_memory(), phys, len);
32 if (!section.mr || int128_get64(section.size) < len) {
35 if (is_write && section.readonly) {
38 if (!memory_region_is_ram(section.mr)) {
42 /* Ignore regions with dirty logging, we cannot mark them dirty */
43 if (memory_region_is_logging(section.mr)) {
48 return memory_region_get_ram_ptr(section.mr) + section.offset_within_region;
51 memory_region_unref(section.mr);
56 static void vring_unmap(void *buffer, bool is_write)
61 mr = qemu_ram_addr_from_host(buffer, &addr);
62 memory_region_unref(mr);
65 /* Map the guest's vring to host memory */
66 bool vring_setup(Vring *vring, VirtIODevice *vdev, int n)
68 hwaddr vring_addr = virtio_queue_get_ring_addr(vdev, n);
69 hwaddr vring_size = virtio_queue_get_ring_size(vdev, n);
72 vring->broken = false;
74 vring_ptr = vring_map(&vring->mr, vring_addr, vring_size, true);
76 error_report("Failed to map vring "
77 "addr %#" HWADDR_PRIx " size %" HWADDR_PRIu,
78 vring_addr, vring_size);
83 vring_init(&vring->vr, virtio_queue_get_num(vdev, n), vring_ptr, 4096);
85 vring->last_avail_idx = virtio_queue_get_last_avail_idx(vdev, n);
86 vring->last_used_idx = vring->vr.used->idx;
87 vring->signalled_used = 0;
88 vring->signalled_used_valid = false;
90 trace_vring_setup(virtio_queue_get_ring_addr(vdev, n),
91 vring->vr.desc, vring->vr.avail, vring->vr.used);
95 void vring_teardown(Vring *vring, VirtIODevice *vdev, int n)
97 virtio_queue_set_last_avail_idx(vdev, n, vring->last_avail_idx);
98 virtio_queue_invalidate_signalled_used(vdev, n);
100 memory_region_unref(vring->mr);
103 /* Disable guest->host notifies */
104 void vring_disable_notification(VirtIODevice *vdev, Vring *vring)
106 if (!(vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX))) {
107 vring->vr.used->flags |= VRING_USED_F_NO_NOTIFY;
111 /* Enable guest->host notifies
113 * Return true if the vring is empty, false if there are more requests.
115 bool vring_enable_notification(VirtIODevice *vdev, Vring *vring)
117 if (vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
118 vring_avail_event(&vring->vr) = vring->vr.avail->idx;
120 vring->vr.used->flags &= ~VRING_USED_F_NO_NOTIFY;
122 smp_mb(); /* ensure update is seen before reading avail_idx */
123 return !vring_more_avail(vring);
126 /* This is stolen from linux/drivers/vhost/vhost.c:vhost_notify() */
127 bool vring_should_notify(VirtIODevice *vdev, Vring *vring)
131 /* Flush out used index updates. This is paired
132 * with the barrier that the Guest executes when enabling
136 if ((vdev->guest_features & VIRTIO_F_NOTIFY_ON_EMPTY) &&
137 unlikely(vring->vr.avail->idx == vring->last_avail_idx)) {
141 if (!(vdev->guest_features & VIRTIO_RING_F_EVENT_IDX)) {
142 return !(vring->vr.avail->flags & VRING_AVAIL_F_NO_INTERRUPT);
144 old = vring->signalled_used;
145 v = vring->signalled_used_valid;
146 new = vring->signalled_used = vring->last_used_idx;
147 vring->signalled_used_valid = true;
153 return vring_need_event(vring_used_event(&vring->vr), new, old);
157 static int get_desc(Vring *vring, VirtQueueElement *elem,
158 struct vring_desc *desc)
165 if (desc->flags & VRING_DESC_F_WRITE) {
167 iov = &elem->in_sg[*num];
168 addr = &elem->in_addr[*num];
170 num = &elem->out_num;
171 iov = &elem->out_sg[*num];
172 addr = &elem->out_addr[*num];
174 /* If it's an output descriptor, they're all supposed
175 * to come before any input descriptors. */
176 if (unlikely(elem->in_num)) {
177 error_report("Descriptor has out after in");
182 /* Stop for now if there are not enough iovecs available. */
183 if (*num >= VIRTQUEUE_MAX_SIZE) {
187 /* TODO handle non-contiguous memory across region boundaries */
188 iov->iov_base = vring_map(&mr, desc->addr, desc->len,
189 desc->flags & VRING_DESC_F_WRITE);
190 if (!iov->iov_base) {
191 error_report("Failed to map descriptor addr %#" PRIx64 " len %u",
192 (uint64_t)desc->addr, desc->len);
196 /* The MemoryRegion is looked up again and unref'ed later, leave the
198 iov->iov_len = desc->len;
204 /* This is stolen from linux/drivers/vhost/vhost.c. */
205 static int get_indirect(Vring *vring, VirtQueueElement *elem,
206 struct vring_desc *indirect)
208 struct vring_desc desc;
209 unsigned int i = 0, count, found = 0;
213 if (unlikely(indirect->len % sizeof(desc))) {
214 error_report("Invalid length in indirect descriptor: "
215 "len %#x not multiple of %#zx",
216 indirect->len, sizeof(desc));
217 vring->broken = true;
221 count = indirect->len / sizeof(desc);
222 /* Buffers are chained via a 16 bit next field, so
223 * we can have at most 2^16 of these. */
224 if (unlikely(count > USHRT_MAX + 1)) {
225 error_report("Indirect buffer length too big: %d", indirect->len);
226 vring->broken = true;
231 struct vring_desc *desc_ptr;
234 /* Translate indirect descriptor */
235 desc_ptr = vring_map(&mr,
236 indirect->addr + found * sizeof(desc),
237 sizeof(desc), false);
239 error_report("Failed to map indirect descriptor "
240 "addr %#" PRIx64 " len %zu",
241 (uint64_t)indirect->addr + found * sizeof(desc),
243 vring->broken = true;
247 memory_region_unref(mr);
249 /* Ensure descriptor has been loaded before accessing fields */
250 barrier(); /* read_barrier_depends(); */
252 if (unlikely(++found > count)) {
253 error_report("Loop detected: last one at %u "
254 "indirect size %u", i, count);
255 vring->broken = true;
259 if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
260 error_report("Nested indirect descriptor");
261 vring->broken = true;
265 ret = get_desc(vring, elem, &desc);
267 vring->broken |= (ret == -EFAULT);
271 } while (desc.flags & VRING_DESC_F_NEXT);
275 void vring_free_element(VirtQueueElement *elem)
279 /* This assumes that the iovecs, if changed, are never moved past
280 * the end of the valid area. This is true if iovec manipulations
281 * are done with iov_discard_front and iov_discard_back.
283 for (i = 0; i < elem->out_num; i++) {
284 vring_unmap(elem->out_sg[i].iov_base, false);
287 for (i = 0; i < elem->in_num; i++) {
288 vring_unmap(elem->in_sg[i].iov_base, true);
291 g_slice_free(VirtQueueElement, elem);
294 /* This looks in the virtqueue and for the first available buffer, and converts
295 * it to an iovec for convenient access. Since descriptors consist of some
296 * number of output then some number of input descriptors, it's actually two
297 * iovecs, but we pack them into one and note how many of each there were.
299 * This function returns the descriptor number found, or vq->num (which is
300 * never a valid descriptor number) if none was found. A negative code is
303 * Stolen from linux/drivers/vhost/vhost.c.
305 int vring_pop(VirtIODevice *vdev, Vring *vring,
306 VirtQueueElement **p_elem)
308 struct vring_desc desc;
309 unsigned int i, head, found = 0, num = vring->vr.num;
310 uint16_t avail_idx, last_avail_idx;
311 VirtQueueElement *elem = NULL;
314 /* If there was a fatal error then refuse operation */
320 /* Check it isn't doing very strange things with descriptor numbers. */
321 last_avail_idx = vring->last_avail_idx;
322 avail_idx = vring->vr.avail->idx;
323 barrier(); /* load indices now and not again later */
325 if (unlikely((uint16_t)(avail_idx - last_avail_idx) > num)) {
326 error_report("Guest moved used index from %u to %u",
327 last_avail_idx, avail_idx);
332 /* If there's nothing new since last we looked. */
333 if (avail_idx == last_avail_idx) {
338 /* Only get avail ring entries after they have been exposed by guest. */
341 /* Grab the next descriptor number they're advertising, and increment
342 * the index we've seen. */
343 head = vring->vr.avail->ring[last_avail_idx % num];
345 elem = g_slice_new(VirtQueueElement);
347 elem->in_num = elem->out_num = 0;
349 /* If their number is silly, that's an error. */
350 if (unlikely(head >= num)) {
351 error_report("Guest says index %u > %u is available", head, num);
356 if (vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
357 vring_avail_event(&vring->vr) = vring->vr.avail->idx;
362 if (unlikely(i >= num)) {
363 error_report("Desc index is %u > %u, head = %u", i, num, head);
367 if (unlikely(++found > num)) {
368 error_report("Loop detected: last one at %u vq size %u head %u",
373 desc = vring->vr.desc[i];
375 /* Ensure descriptor is loaded before accessing fields */
378 if (desc.flags & VRING_DESC_F_INDIRECT) {
379 ret = get_indirect(vring, elem, &desc);
386 ret = get_desc(vring, elem, &desc);
392 } while (desc.flags & VRING_DESC_F_NEXT);
394 /* On success, increment avail index. */
395 vring->last_avail_idx++;
401 if (ret == -EFAULT) {
402 vring->broken = true;
405 vring_free_element(elem);
411 /* After we've used one of their buffers, we tell them about it.
413 * Stolen from linux/drivers/vhost/vhost.c.
415 void vring_push(Vring *vring, VirtQueueElement *elem, int len)
417 struct vring_used_elem *used;
418 unsigned int head = elem->index;
421 vring_free_element(elem);
423 /* Don't touch vring if a fatal error occurred */
428 /* The virtqueue contains a ring of used buffers. Get a pointer to the
429 * next entry in that used ring. */
430 used = &vring->vr.used->ring[vring->last_used_idx % vring->vr.num];
434 /* Make sure buffer is written before we update index. */
437 new = vring->vr.used->idx = ++vring->last_used_idx;
438 if (unlikely((int16_t)(new - vring->signalled_used) < (uint16_t)1)) {
439 vring->signalled_used_valid = false;