]> Git Repo - qemu.git/blame - hw/virtio/virtio.c
Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-2.9-20170202' into staging
[qemu.git] / hw / virtio / virtio.c
CommitLineData
967f97fa
AL
1/*
2 * Virtio Support
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <[email protected]>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
9b8bfe21 14#include "qemu/osdep.h"
da34e65c 15#include "qapi/error.h"
4771d756
PB
16#include "qemu-common.h"
17#include "cpu.h"
64979a4d 18#include "trace.h"
fdfba1a2 19#include "exec/address-spaces.h"
1de7afc9 20#include "qemu/error-report.h"
0d09e41a 21#include "hw/virtio/virtio.h"
1de7afc9 22#include "qemu/atomic.h"
0d09e41a 23#include "hw/virtio/virtio-bus.h"
6b321a3d 24#include "migration/migration.h"
cee3ca00 25#include "hw/virtio/virtio-access.h"
8607f5c3 26#include "sysemu/dma.h"
967f97fa 27
6ce69d1c
PM
28/*
29 * The alignment to use between consumer and producer parts of vring.
30 * x86 pagesize again. This is the default, used by transports like PCI
31 * which don't provide a means for the guest to tell the host the alignment.
32 */
f46f15bc
AL
33#define VIRTIO_PCI_VRING_ALIGN 4096
34
967f97fa
AL
35typedef struct VRingDesc
36{
37 uint64_t addr;
38 uint32_t len;
39 uint16_t flags;
40 uint16_t next;
41} VRingDesc;
42
43typedef struct VRingAvail
44{
45 uint16_t flags;
46 uint16_t idx;
47 uint16_t ring[0];
48} VRingAvail;
49
50typedef struct VRingUsedElem
51{
52 uint32_t id;
53 uint32_t len;
54} VRingUsedElem;
55
56typedef struct VRingUsed
57{
58 uint16_t flags;
59 uint16_t idx;
60 VRingUsedElem ring[0];
61} VRingUsed;
62
63typedef struct VRing
64{
65 unsigned int num;
46c5d082 66 unsigned int num_default;
6ce69d1c 67 unsigned int align;
a8170e5e
AK
68 hwaddr desc;
69 hwaddr avail;
70 hwaddr used;
967f97fa
AL
71} VRing;
72
73struct VirtQueue
74{
75 VRing vring;
be1fea9b
VM
76
77 /* Next head to pop */
967f97fa 78 uint16_t last_avail_idx;
b796fcd1 79
be1fea9b
VM
80 /* Last avail_idx read from VQ. */
81 uint16_t shadow_avail_idx;
82
b796fcd1
VM
83 uint16_t used_idx;
84
bcbabae8
MT
85 /* Last used index value we have signalled on */
86 uint16_t signalled_used;
87
88 /* Last used index value we have signalled on */
89 bool signalled_used_valid;
90
332fa82d
SH
91 /* Notification enabled? */
92 bool notification;
bcbabae8 93
e78a2b42
JW
94 uint16_t queue_index;
95
e66bcc40 96 unsigned int inuse;
bcbabae8 97
7055e687 98 uint16_t vector;
bf1780b0
FZ
99 VirtIOHandleOutput handle_output;
100 VirtIOHandleOutput handle_aio_output;
1cbdabe2
MT
101 VirtIODevice *vdev;
102 EventNotifier guest_notifier;
103 EventNotifier host_notifier;
e0d686bf 104 QLIST_ENTRY(VirtQueue) node;
967f97fa
AL
105};
106
967f97fa 107/* virt queue functions */
ab223c95 108void virtio_queue_update_rings(VirtIODevice *vdev, int n)
967f97fa 109{
ab223c95 110 VRing *vring = &vdev->vq[n].vring;
53c25cea 111
ab223c95
CH
112 if (!vring->desc) {
113 /* not yet setup -> nothing to do */
114 return;
115 }
116 vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
117 vring->used = vring_align(vring->avail +
118 offsetof(VRingAvail, ring[vring->num]),
119 vring->align);
967f97fa
AL
120}
121
aa570d6f
PB
122static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
123 hwaddr desc_pa, int i)
967f97fa 124{
8607f5c3 125 address_space_read(vdev->dma_as, desc_pa + i * sizeof(VRingDesc),
aa570d6f
PB
126 MEMTXATTRS_UNSPECIFIED, (void *)desc, sizeof(VRingDesc));
127 virtio_tswap64s(vdev, &desc->addr);
128 virtio_tswap32s(vdev, &desc->len);
129 virtio_tswap16s(vdev, &desc->flags);
130 virtio_tswap16s(vdev, &desc->next);
967f97fa
AL
131}
132
133static inline uint16_t vring_avail_flags(VirtQueue *vq)
134{
a8170e5e 135 hwaddr pa;
967f97fa 136 pa = vq->vring.avail + offsetof(VRingAvail, flags);
cee3ca00 137 return virtio_lduw_phys(vq->vdev, pa);
967f97fa
AL
138}
139
140static inline uint16_t vring_avail_idx(VirtQueue *vq)
141{
a8170e5e 142 hwaddr pa;
967f97fa 143 pa = vq->vring.avail + offsetof(VRingAvail, idx);
be1fea9b
VM
144 vq->shadow_avail_idx = virtio_lduw_phys(vq->vdev, pa);
145 return vq->shadow_avail_idx;
967f97fa
AL
146}
147
148static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
149{
a8170e5e 150 hwaddr pa;
967f97fa 151 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
cee3ca00 152 return virtio_lduw_phys(vq->vdev, pa);
967f97fa
AL
153}
154
e9600c6c 155static inline uint16_t vring_get_used_event(VirtQueue *vq)
bcbabae8
MT
156{
157 return vring_avail_ring(vq, vq->vring.num);
158}
159
1cdd2ee5
VM
160static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
161 int i)
967f97fa 162{
a8170e5e 163 hwaddr pa;
1cdd2ee5
VM
164 virtio_tswap32s(vq->vdev, &uelem->id);
165 virtio_tswap32s(vq->vdev, &uelem->len);
166 pa = vq->vring.used + offsetof(VRingUsed, ring[i]);
8607f5c3 167 address_space_write(vq->vdev->dma_as, pa, MEMTXATTRS_UNSPECIFIED,
1cdd2ee5 168 (void *)uelem, sizeof(VRingUsedElem));
967f97fa
AL
169}
170
171static uint16_t vring_used_idx(VirtQueue *vq)
172{
a8170e5e 173 hwaddr pa;
967f97fa 174 pa = vq->vring.used + offsetof(VRingUsed, idx);
cee3ca00 175 return virtio_lduw_phys(vq->vdev, pa);
967f97fa
AL
176}
177
bcbabae8 178static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
967f97fa 179{
a8170e5e 180 hwaddr pa;
967f97fa 181 pa = vq->vring.used + offsetof(VRingUsed, idx);
cee3ca00 182 virtio_stw_phys(vq->vdev, pa, val);
b796fcd1 183 vq->used_idx = val;
967f97fa
AL
184}
185
186static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
187{
cee3ca00 188 VirtIODevice *vdev = vq->vdev;
a8170e5e 189 hwaddr pa;
967f97fa 190 pa = vq->vring.used + offsetof(VRingUsed, flags);
cee3ca00 191 virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask);
967f97fa
AL
192}
193
194static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
195{
cee3ca00 196 VirtIODevice *vdev = vq->vdev;
a8170e5e 197 hwaddr pa;
967f97fa 198 pa = vq->vring.used + offsetof(VRingUsed, flags);
cee3ca00 199 virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask);
967f97fa
AL
200}
201
e9600c6c 202static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
bcbabae8 203{
a8170e5e 204 hwaddr pa;
332fa82d 205 if (!vq->notification) {
bcbabae8
MT
206 return;
207 }
208 pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
cee3ca00 209 virtio_stw_phys(vq->vdev, pa, val);
bcbabae8
MT
210}
211
967f97fa
AL
212void virtio_queue_set_notification(VirtQueue *vq, int enable)
213{
332fa82d 214 vq->notification = enable;
95129d6f 215 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
e9600c6c 216 vring_set_avail_event(vq, vring_avail_idx(vq));
bcbabae8 217 } else if (enable) {
967f97fa 218 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
bcbabae8 219 } else {
967f97fa 220 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
bcbabae8 221 }
92045d80
MT
222 if (enable) {
223 /* Expose avail event/used flags before caller checks the avail idx. */
224 smp_mb();
225 }
967f97fa
AL
226}
227
228int virtio_queue_ready(VirtQueue *vq)
229{
230 return vq->vring.avail != 0;
231}
232
be1fea9b
VM
233/* Fetch avail_idx from VQ memory only when we really need to know if
234 * guest has added some buffers. */
967f97fa
AL
235int virtio_queue_empty(VirtQueue *vq)
236{
be1fea9b
VM
237 if (vq->shadow_avail_idx != vq->last_avail_idx) {
238 return 0;
239 }
240
967f97fa
AL
241 return vring_avail_idx(vq) == vq->last_avail_idx;
242}
243
ce317461
JW
244static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
245 unsigned int len)
967f97fa 246{
8607f5c3 247 AddressSpace *dma_as = vq->vdev->dma_as;
967f97fa
AL
248 unsigned int offset;
249 int i;
250
967f97fa
AL
251 offset = 0;
252 for (i = 0; i < elem->in_num; i++) {
253 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
254
8607f5c3
JW
255 dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
256 elem->in_sg[i].iov_len,
257 DMA_DIRECTION_FROM_DEVICE, size);
967f97fa 258
0cea71a2 259 offset += size;
967f97fa
AL
260 }
261
26b258e1 262 for (i = 0; i < elem->out_num; i++)
8607f5c3
JW
263 dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
264 elem->out_sg[i].iov_len,
265 DMA_DIRECTION_TO_DEVICE,
266 elem->out_sg[i].iov_len);
ce317461
JW
267}
268
2640d2a5
SH
269/* virtqueue_detach_element:
270 * @vq: The #VirtQueue
271 * @elem: The #VirtQueueElement
272 * @len: number of bytes written
273 *
274 * Detach the element from the virtqueue. This function is suitable for device
275 * reset or other situations where a #VirtQueueElement is simply freed and will
276 * not be pushed or discarded.
277 */
278void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
279 unsigned int len)
280{
281 vq->inuse--;
282 virtqueue_unmap_sg(vq, elem, len);
283}
284
27e57efe 285/* virtqueue_unpop:
2640d2a5
SH
286 * @vq: The #VirtQueue
287 * @elem: The #VirtQueueElement
288 * @len: number of bytes written
289 *
290 * Pretend the most recent element wasn't popped from the virtqueue. The next
291 * call to virtqueue_pop() will refetch the element.
292 */
27e57efe
LP
293void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
294 unsigned int len)
29b9f5ef
JW
295{
296 vq->last_avail_idx--;
2640d2a5 297 virtqueue_detach_element(vq, elem, len);
29b9f5ef
JW
298}
299
297a75e6
SH
300/* virtqueue_rewind:
301 * @vq: The #VirtQueue
302 * @num: Number of elements to push back
303 *
304 * Pretend that elements weren't popped from the virtqueue. The next
305 * virtqueue_pop() will refetch the oldest element.
306 *
27e57efe 307 * Use virtqueue_unpop() instead if you have a VirtQueueElement.
297a75e6
SH
308 *
309 * Returns: true on success, false if @num is greater than the number of in use
310 * elements.
311 */
312bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
313{
314 if (num > vq->inuse) {
315 return false;
316 }
317 vq->last_avail_idx -= num;
318 vq->inuse -= num;
319 return true;
320}
321
ce317461
JW
322void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
323 unsigned int len, unsigned int idx)
324{
1cdd2ee5
VM
325 VRingUsedElem uelem;
326
ce317461
JW
327 trace_virtqueue_fill(vq, elem, len, idx);
328
329 virtqueue_unmap_sg(vq, elem, len);
26b258e1 330
f5ed3663
SH
331 if (unlikely(vq->vdev->broken)) {
332 return;
333 }
334
b796fcd1 335 idx = (idx + vq->used_idx) % vq->vring.num;
967f97fa 336
1cdd2ee5
VM
337 uelem.id = elem->index;
338 uelem.len = len;
339 vring_used_write(vq, &uelem, idx);
967f97fa
AL
340}
341
342void virtqueue_flush(VirtQueue *vq, unsigned int count)
343{
bcbabae8 344 uint16_t old, new;
f5ed3663
SH
345
346 if (unlikely(vq->vdev->broken)) {
347 vq->inuse -= count;
348 return;
349 }
350
967f97fa 351 /* Make sure buffer is written before we update index. */
b90d2f35 352 smp_wmb();
64979a4d 353 trace_virtqueue_flush(vq, count);
b796fcd1 354 old = vq->used_idx;
bcbabae8
MT
355 new = old + count;
356 vring_used_idx_set(vq, new);
967f97fa 357 vq->inuse -= count;
bcbabae8
MT
358 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
359 vq->signalled_used_valid = false;
967f97fa
AL
360}
361
362void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
363 unsigned int len)
364{
365 virtqueue_fill(vq, elem, len, 0);
366 virtqueue_flush(vq, 1);
367}
368
369static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
370{
371 uint16_t num_heads = vring_avail_idx(vq) - idx;
372
373 /* Check it isn't doing very strange things with descriptor numbers. */
bb6834cf 374 if (num_heads > vq->vring.num) {
4355c1ab 375 virtio_error(vq->vdev, "Guest moved used index from %u to %u",
be1fea9b 376 idx, vq->shadow_avail_idx);
4355c1ab 377 return -EINVAL;
bb6834cf 378 }
a821ce59
MT
379 /* On success, callers read a descriptor at vq->last_avail_idx.
380 * Make sure descriptor read does not bypass avail index read. */
381 if (num_heads) {
382 smp_rmb();
383 }
967f97fa
AL
384
385 return num_heads;
386}
387
fb1131b6
SH
388static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
389 unsigned int *head)
967f97fa 390{
967f97fa
AL
391 /* Grab the next descriptor number they're advertising, and increment
392 * the index we've seen. */
fb1131b6 393 *head = vring_avail_ring(vq, idx % vq->vring.num);
967f97fa
AL
394
395 /* If their number is silly, that's a fatal mistake. */
fb1131b6
SH
396 if (*head >= vq->vring.num) {
397 virtio_error(vq->vdev, "Guest says index %u is available", *head);
398 return false;
bb6834cf 399 }
967f97fa 400
fb1131b6 401 return true;
967f97fa
AL
402}
403
412e0e81
SH
404enum {
405 VIRTQUEUE_READ_DESC_ERROR = -1,
406 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
407 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
408};
967f97fa 409
412e0e81
SH
410static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
411 hwaddr desc_pa, unsigned int max,
412 unsigned int *next)
413{
967f97fa 414 /* If this descriptor says it doesn't chain, we're done. */
aa570d6f 415 if (!(desc->flags & VRING_DESC_F_NEXT)) {
412e0e81 416 return VIRTQUEUE_READ_DESC_DONE;
cee3ca00 417 }
967f97fa
AL
418
419 /* Check they're not leading us off end of descriptors. */
412e0e81 420 *next = desc->next;
967f97fa 421 /* Make sure compiler knows to grab that: we don't want it changing! */
b90d2f35 422 smp_wmb();
967f97fa 423
412e0e81
SH
424 if (*next >= max) {
425 virtio_error(vdev, "Desc next is %u", *next);
426 return VIRTQUEUE_READ_DESC_ERROR;
bb6834cf 427 }
967f97fa 428
412e0e81
SH
429 vring_desc_read(vdev, desc, desc_pa, *next);
430 return VIRTQUEUE_READ_DESC_MORE;
967f97fa
AL
431}
432
0d8d7690 433void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
e1f7b481
MT
434 unsigned int *out_bytes,
435 unsigned max_in_bytes, unsigned max_out_bytes)
967f97fa 436{
efeea6d0 437 unsigned int idx;
385ce95d 438 unsigned int total_bufs, in_total, out_total;
412e0e81 439 int rc;
967f97fa
AL
440
441 idx = vq->last_avail_idx;
442
efeea6d0 443 total_bufs = in_total = out_total = 0;
4355c1ab 444 while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
cee3ca00 445 VirtIODevice *vdev = vq->vdev;
efeea6d0 446 unsigned int max, num_bufs, indirect = 0;
aa570d6f 447 VRingDesc desc;
a8170e5e 448 hwaddr desc_pa;
b1c7c07f 449 unsigned int i;
967f97fa 450
efeea6d0
MM
451 max = vq->vring.num;
452 num_bufs = total_bufs;
fb1131b6
SH
453
454 if (!virtqueue_get_head(vq, idx++, &i)) {
455 goto err;
456 }
457
efeea6d0 458 desc_pa = vq->vring.desc;
aa570d6f 459 vring_desc_read(vdev, &desc, desc_pa, i);
efeea6d0 460
aa570d6f
PB
461 if (desc.flags & VRING_DESC_F_INDIRECT) {
462 if (desc.len % sizeof(VRingDesc)) {
d65abf85
SH
463 virtio_error(vdev, "Invalid size for indirect buffer table");
464 goto err;
efeea6d0
MM
465 }
466
467 /* If we've got too many, that implies a descriptor loop. */
468 if (num_bufs >= max) {
d65abf85
SH
469 virtio_error(vdev, "Looped descriptor");
470 goto err;
efeea6d0
MM
471 }
472
473 /* loop over the indirect descriptor table */
474 indirect = 1;
aa570d6f
PB
475 max = desc.len / sizeof(VRingDesc);
476 desc_pa = desc.addr;
1ae2757c 477 num_bufs = i = 0;
aa570d6f 478 vring_desc_read(vdev, &desc, desc_pa, i);
efeea6d0
MM
479 }
480
967f97fa
AL
481 do {
482 /* If we've got too many, that implies a descriptor loop. */
5774cf98 483 if (++num_bufs > max) {
d65abf85
SH
484 virtio_error(vdev, "Looped descriptor");
485 goto err;
bb6834cf 486 }
967f97fa 487
aa570d6f
PB
488 if (desc.flags & VRING_DESC_F_WRITE) {
489 in_total += desc.len;
967f97fa 490 } else {
aa570d6f 491 out_total += desc.len;
967f97fa 492 }
e1f7b481
MT
493 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
494 goto done;
495 }
412e0e81
SH
496
497 rc = virtqueue_read_next_desc(vdev, &desc, desc_pa, max, &i);
498 } while (rc == VIRTQUEUE_READ_DESC_MORE);
499
500 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
501 goto err;
502 }
efeea6d0
MM
503
504 if (!indirect)
505 total_bufs = num_bufs;
506 else
507 total_bufs++;
967f97fa 508 }
4355c1ab
SH
509
510 if (rc < 0) {
511 goto err;
512 }
513
e1f7b481 514done:
0d8d7690
AS
515 if (in_bytes) {
516 *in_bytes = in_total;
517 }
518 if (out_bytes) {
519 *out_bytes = out_total;
520 }
d65abf85
SH
521 return;
522
523err:
524 in_total = out_total = 0;
525 goto done;
0d8d7690 526}
967f97fa 527
0d8d7690
AS
528int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
529 unsigned int out_bytes)
530{
531 unsigned int in_total, out_total;
532
e1f7b481
MT
533 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
534 return in_bytes <= in_total && out_bytes <= out_total;
967f97fa
AL
535}
536
ec55da19
SH
537static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
538 hwaddr *addr, struct iovec *iov,
3b3b0628
PB
539 unsigned int max_num_sg, bool is_write,
540 hwaddr pa, size_t sz)
541{
ec55da19 542 bool ok = false;
3b3b0628
PB
543 unsigned num_sg = *p_num_sg;
544 assert(num_sg <= max_num_sg);
545
1e7aed70 546 if (!sz) {
ec55da19
SH
547 virtio_error(vdev, "virtio: zero sized buffers are not allowed");
548 goto out;
1e7aed70
PP
549 }
550
3b3b0628
PB
551 while (sz) {
552 hwaddr len = sz;
553
554 if (num_sg == max_num_sg) {
ec55da19
SH
555 virtio_error(vdev, "virtio: too many write descriptors in "
556 "indirect table");
557 goto out;
3b3b0628
PB
558 }
559
8607f5c3
JW
560 iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
561 is_write ?
562 DMA_DIRECTION_FROM_DEVICE :
563 DMA_DIRECTION_TO_DEVICE);
973e7170 564 if (!iov[num_sg].iov_base) {
ec55da19
SH
565 virtio_error(vdev, "virtio: bogus descriptor or out of resources");
566 goto out;
973e7170
PP
567 }
568
3b3b0628
PB
569 iov[num_sg].iov_len = len;
570 addr[num_sg] = pa;
571
572 sz -= len;
573 pa += len;
574 num_sg++;
575 }
ec55da19
SH
576 ok = true;
577
578out:
3b3b0628 579 *p_num_sg = num_sg;
ec55da19
SH
580 return ok;
581}
582
583/* Only used by error code paths before we have a VirtQueueElement (therefore
584 * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
585 * yet.
586 */
587static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
588 struct iovec *iov)
589{
590 unsigned int i;
591
592 for (i = 0; i < out_num + in_num; i++) {
593 int is_write = i >= out_num;
594
595 cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
596 iov++;
597 }
3b3b0628
PB
598}
599
8607f5c3
JW
600static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
601 hwaddr *addr, unsigned int *num_sg,
6bdc21c0 602 int is_write)
42fb2e07
KW
603{
604 unsigned int i;
a8170e5e 605 hwaddr len;
42fb2e07 606
8059feee 607 for (i = 0; i < *num_sg; i++) {
42fb2e07 608 len = sg[i].iov_len;
8607f5c3
JW
609 sg[i].iov_base = dma_memory_map(vdev->dma_as,
610 addr[i], &len, is_write ?
611 DMA_DIRECTION_FROM_DEVICE :
612 DMA_DIRECTION_TO_DEVICE);
8059feee 613 if (!sg[i].iov_base) {
1a285899 614 error_report("virtio: error trying to map MMIO memory");
42fb2e07
KW
615 exit(1);
616 }
3b3b0628
PB
617 if (len != sg[i].iov_len) {
618 error_report("virtio: unexpected memory split");
8059feee
MT
619 exit(1);
620 }
42fb2e07
KW
621 }
622}
623
8607f5c3 624void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
8059feee 625{
6bdc21c0
MT
626 virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, &elem->in_num, 1);
627 virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, &elem->out_num, 0);
3724650d
PB
628}
629
bf91bd27 630static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
3724650d
PB
631{
632 VirtQueueElement *elem;
633 size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
634 size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
635 size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
636 size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
637 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
638 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
639
640 assert(sz >= sizeof(VirtQueueElement));
641 elem = g_malloc(out_sg_end);
642 elem->out_num = out_num;
643 elem->in_num = in_num;
644 elem->in_addr = (void *)elem + in_addr_ofs;
645 elem->out_addr = (void *)elem + out_addr_ofs;
646 elem->in_sg = (void *)elem + in_sg_ofs;
647 elem->out_sg = (void *)elem + out_sg_ofs;
648 return elem;
8059feee
MT
649}
650
51b19ebe 651void *virtqueue_pop(VirtQueue *vq, size_t sz)
967f97fa 652{
5774cf98 653 unsigned int i, head, max;
a8170e5e 654 hwaddr desc_pa = vq->vring.desc;
cee3ca00 655 VirtIODevice *vdev = vq->vdev;
51b19ebe 656 VirtQueueElement *elem;
3b3b0628
PB
657 unsigned out_num, in_num;
658 hwaddr addr[VIRTQUEUE_MAX_SIZE];
659 struct iovec iov[VIRTQUEUE_MAX_SIZE];
aa570d6f 660 VRingDesc desc;
412e0e81 661 int rc;
967f97fa 662
f5ed3663
SH
663 if (unlikely(vdev->broken)) {
664 return NULL;
665 }
be1fea9b 666 if (virtio_queue_empty(vq)) {
51b19ebe
PB
667 return NULL;
668 }
be1fea9b
VM
669 /* Needed after virtio_queue_empty(), see comment in
670 * virtqueue_num_heads(). */
671 smp_rmb();
967f97fa
AL
672
673 /* When we start there are none of either input nor output. */
3b3b0628 674 out_num = in_num = 0;
967f97fa 675
5774cf98
MM
676 max = vq->vring.num;
677
afd9096e 678 if (vq->inuse >= vq->vring.num) {
ec55da19
SH
679 virtio_error(vdev, "Virtqueue size exceeded");
680 return NULL;
afd9096e
SH
681 }
682
fb1131b6
SH
683 if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
684 return NULL;
685 }
686
95129d6f 687 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
e9600c6c 688 vring_set_avail_event(vq, vq->last_avail_idx);
bcbabae8 689 }
efeea6d0 690
fb1131b6 691 i = head;
aa570d6f
PB
692 vring_desc_read(vdev, &desc, desc_pa, i);
693 if (desc.flags & VRING_DESC_F_INDIRECT) {
694 if (desc.len % sizeof(VRingDesc)) {
ec55da19
SH
695 virtio_error(vdev, "Invalid size for indirect buffer table");
696 return NULL;
efeea6d0
MM
697 }
698
699 /* loop over the indirect descriptor table */
aa570d6f
PB
700 max = desc.len / sizeof(VRingDesc);
701 desc_pa = desc.addr;
efeea6d0 702 i = 0;
aa570d6f 703 vring_desc_read(vdev, &desc, desc_pa, i);
efeea6d0
MM
704 }
705
42fb2e07 706 /* Collect all the descriptors */
967f97fa 707 do {
ec55da19
SH
708 bool map_ok;
709
aa570d6f 710 if (desc.flags & VRING_DESC_F_WRITE) {
ec55da19
SH
711 map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
712 iov + out_num,
713 VIRTQUEUE_MAX_SIZE - out_num, true,
714 desc.addr, desc.len);
42fb2e07 715 } else {
3b3b0628 716 if (in_num) {
ec55da19
SH
717 virtio_error(vdev, "Incorrect order for descriptors");
718 goto err_undo_map;
c8eac1cf 719 }
ec55da19
SH
720 map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
721 VIRTQUEUE_MAX_SIZE, false,
722 desc.addr, desc.len);
723 }
724 if (!map_ok) {
725 goto err_undo_map;
42fb2e07 726 }
967f97fa 727
967f97fa 728 /* If we've got too many, that implies a descriptor loop. */
3b3b0628 729 if ((in_num + out_num) > max) {
ec55da19
SH
730 virtio_error(vdev, "Looped descriptor");
731 goto err_undo_map;
bb6834cf 732 }
412e0e81
SH
733
734 rc = virtqueue_read_next_desc(vdev, &desc, desc_pa, max, &i);
735 } while (rc == VIRTQUEUE_READ_DESC_MORE);
736
737 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
738 goto err_undo_map;
739 }
967f97fa 740
3b3b0628
PB
741 /* Now copy what we have collected and mapped */
742 elem = virtqueue_alloc_element(sz, out_num, in_num);
967f97fa 743 elem->index = head;
3b3b0628
PB
744 for (i = 0; i < out_num; i++) {
745 elem->out_addr[i] = addr[i];
746 elem->out_sg[i] = iov[i];
747 }
748 for (i = 0; i < in_num; i++) {
749 elem->in_addr[i] = addr[out_num + i];
750 elem->in_sg[i] = iov[out_num + i];
751 }
967f97fa
AL
752
753 vq->inuse++;
754
64979a4d 755 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
51b19ebe 756 return elem;
ec55da19
SH
757
758err_undo_map:
759 virtqueue_undo_map_desc(out_num, in_num, iov);
760 return NULL;
967f97fa
AL
761}
762
54e17709
YB
763/* virtqueue_drop_all:
764 * @vq: The #VirtQueue
765 * Drops all queued buffers and indicates them to the guest
766 * as if they are done. Useful when buffers can not be
767 * processed but must be returned to the guest.
768 */
769unsigned int virtqueue_drop_all(VirtQueue *vq)
770{
771 unsigned int dropped = 0;
772 VirtQueueElement elem = {};
773 VirtIODevice *vdev = vq->vdev;
774 bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
775
776 if (unlikely(vdev->broken)) {
777 return 0;
778 }
779
780 while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
781 /* works similar to virtqueue_pop but does not map buffers
782 * and does not allocate any memory */
783 smp_rmb();
784 if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
785 break;
786 }
787 vq->inuse++;
788 vq->last_avail_idx++;
789 if (fEventIdx) {
790 vring_set_avail_event(vq, vq->last_avail_idx);
791 }
792 /* immediately push the element, nothing to unmap
793 * as both in_num and out_num are set to 0 */
794 virtqueue_push(vq, &elem, 0);
795 dropped++;
796 }
797
798 return dropped;
799}
800
3724650d
PB
801/* Reading and writing a structure directly to QEMUFile is *awful*, but
802 * it is what QEMU has always done by mistake. We can change it sooner
803 * or later by bumping the version number of the affected vm states.
804 * In the meanwhile, since the in-memory layout of VirtQueueElement
805 * has changed, we need to marshal to and from the layout that was
806 * used before the change.
807 */
808typedef struct VirtQueueElementOld {
809 unsigned int index;
810 unsigned int out_num;
811 unsigned int in_num;
812 hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
813 hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
814 struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
815 struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
816} VirtQueueElementOld;
817
8607f5c3 818void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
ab281c17 819{
3724650d
PB
820 VirtQueueElement *elem;
821 VirtQueueElementOld data;
822 int i;
823
824 qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
825
6bdc21c0
MT
826 /* TODO: teach all callers that this can fail, and return failure instead
827 * of asserting here.
828 * When we do, we might be able to re-enable NDEBUG below.
829 */
830#ifdef NDEBUG
831#error building with NDEBUG is not supported
832#endif
833 assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
834 assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
835
3724650d
PB
836 elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
837 elem->index = data.index;
838
839 for (i = 0; i < elem->in_num; i++) {
840 elem->in_addr[i] = data.in_addr[i];
841 }
842
843 for (i = 0; i < elem->out_num; i++) {
844 elem->out_addr[i] = data.out_addr[i];
845 }
846
847 for (i = 0; i < elem->in_num; i++) {
848 /* Base is overwritten by virtqueue_map. */
849 elem->in_sg[i].iov_base = 0;
850 elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
851 }
852
853 for (i = 0; i < elem->out_num; i++) {
854 /* Base is overwritten by virtqueue_map. */
855 elem->out_sg[i].iov_base = 0;
856 elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
857 }
858
8607f5c3 859 virtqueue_map(vdev, elem);
ab281c17
PB
860 return elem;
861}
862
863void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem)
864{
3724650d
PB
865 VirtQueueElementOld data;
866 int i;
867
868 memset(&data, 0, sizeof(data));
869 data.index = elem->index;
870 data.in_num = elem->in_num;
871 data.out_num = elem->out_num;
872
873 for (i = 0; i < elem->in_num; i++) {
874 data.in_addr[i] = elem->in_addr[i];
875 }
876
877 for (i = 0; i < elem->out_num; i++) {
878 data.out_addr[i] = elem->out_addr[i];
879 }
880
881 for (i = 0; i < elem->in_num; i++) {
882 /* Base is overwritten by virtqueue_map when loading. Do not
883 * save it, as it would leak the QEMU address space layout. */
884 data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
885 }
886
887 for (i = 0; i < elem->out_num; i++) {
888 /* Do not save iov_base as above. */
889 data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
890 }
891 qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
ab281c17
PB
892}
893
967f97fa 894/* virtio device */
7055e687
MT
895static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
896{
1c819449
FK
897 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
898 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
899
f5ed3663
SH
900 if (unlikely(vdev->broken)) {
901 return;
902 }
903
1c819449
FK
904 if (k->notify) {
905 k->notify(qbus->parent, vector);
7055e687
MT
906 }
907}
967f97fa 908
53c25cea 909void virtio_update_irq(VirtIODevice *vdev)
967f97fa 910{
7055e687 911 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
967f97fa
AL
912}
913
0b352fd6
CH
914static int virtio_validate_features(VirtIODevice *vdev)
915{
916 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
917
8607f5c3
JW
918 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
919 !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
920 return -EFAULT;
921 }
922
0b352fd6
CH
923 if (k->validate_features) {
924 return k->validate_features(vdev);
925 } else {
926 return 0;
927 }
928}
929
930int virtio_set_status(VirtIODevice *vdev, uint8_t val)
4e1837f8 931{
181103cd 932 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
4e1837f8
SH
933 trace_virtio_set_status(vdev, val);
934
95129d6f 935 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
0b352fd6
CH
936 if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
937 val & VIRTIO_CONFIG_S_FEATURES_OK) {
938 int ret = virtio_validate_features(vdev);
939
940 if (ret) {
941 return ret;
942 }
943 }
944 }
181103cd
FK
945 if (k->set_status) {
946 k->set_status(vdev, val);
4e1837f8
SH
947 }
948 vdev->status = val;
0b352fd6 949 return 0;
4e1837f8
SH
950}
951
616a6552
GK
952bool target_words_bigendian(void);
953static enum virtio_device_endian virtio_default_endian(void)
954{
955 if (target_words_bigendian()) {
956 return VIRTIO_DEVICE_ENDIAN_BIG;
957 } else {
958 return VIRTIO_DEVICE_ENDIAN_LITTLE;
959 }
960}
961
962static enum virtio_device_endian virtio_current_cpu_endian(void)
963{
964 CPUClass *cc = CPU_GET_CLASS(current_cpu);
965
966 if (cc->virtio_is_big_endian(current_cpu)) {
967 return VIRTIO_DEVICE_ENDIAN_BIG;
968 } else {
969 return VIRTIO_DEVICE_ENDIAN_LITTLE;
970 }
971}
972
53c25cea 973void virtio_reset(void *opaque)
967f97fa
AL
974{
975 VirtIODevice *vdev = opaque;
181103cd 976 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
977 int i;
978
e0c472d8 979 virtio_set_status(vdev, 0);
616a6552
GK
980 if (current_cpu) {
981 /* Guest initiated reset */
982 vdev->device_endian = virtio_current_cpu_endian();
983 } else {
984 /* System reset */
985 vdev->device_endian = virtio_default_endian();
986 }
e0c472d8 987
181103cd
FK
988 if (k->reset) {
989 k->reset(vdev);
990 }
967f97fa 991
f5ed3663 992 vdev->broken = false;
704a76fc 993 vdev->guest_features = 0;
967f97fa
AL
994 vdev->queue_sel = 0;
995 vdev->status = 0;
0687c37c 996 atomic_set(&vdev->isr, 0);
7055e687
MT
997 vdev->config_vector = VIRTIO_NO_VECTOR;
998 virtio_notify_vector(vdev, vdev->config_vector);
967f97fa 999
87b3bd1c 1000 for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
967f97fa
AL
1001 vdev->vq[i].vring.desc = 0;
1002 vdev->vq[i].vring.avail = 0;
1003 vdev->vq[i].vring.used = 0;
1004 vdev->vq[i].last_avail_idx = 0;
be1fea9b 1005 vdev->vq[i].shadow_avail_idx = 0;
b796fcd1 1006 vdev->vq[i].used_idx = 0;
e0d686bf 1007 virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
bcbabae8
MT
1008 vdev->vq[i].signalled_used = 0;
1009 vdev->vq[i].signalled_used_valid = false;
332fa82d 1010 vdev->vq[i].notification = true;
46c5d082 1011 vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
4b7f91ed 1012 vdev->vq[i].inuse = 0;
967f97fa
AL
1013 }
1014}
1015
53c25cea 1016uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
967f97fa 1017{
181103cd 1018 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
1019 uint8_t val;
1020
5f5a1318 1021 if (addr + sizeof(val) > vdev->config_len) {
967f97fa 1022 return (uint32_t)-1;
5f5a1318
JW
1023 }
1024
1025 k->get_config(vdev, vdev->config);
967f97fa 1026
06dbfc6f 1027 val = ldub_p(vdev->config + addr);
967f97fa
AL
1028 return val;
1029}
1030
53c25cea 1031uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
967f97fa 1032{
181103cd 1033 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
1034 uint16_t val;
1035
5f5a1318 1036 if (addr + sizeof(val) > vdev->config_len) {
967f97fa 1037 return (uint32_t)-1;
5f5a1318
JW
1038 }
1039
1040 k->get_config(vdev, vdev->config);
967f97fa 1041
06dbfc6f 1042 val = lduw_p(vdev->config + addr);
967f97fa
AL
1043 return val;
1044}
1045
53c25cea 1046uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
967f97fa 1047{
181103cd 1048 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
1049 uint32_t val;
1050
5f5a1318 1051 if (addr + sizeof(val) > vdev->config_len) {
967f97fa 1052 return (uint32_t)-1;
5f5a1318
JW
1053 }
1054
1055 k->get_config(vdev, vdev->config);
967f97fa 1056
06dbfc6f 1057 val = ldl_p(vdev->config + addr);
967f97fa
AL
1058 return val;
1059}
1060
53c25cea 1061void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 1062{
181103cd 1063 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
1064 uint8_t val = data;
1065
5f5a1318 1066 if (addr + sizeof(val) > vdev->config_len) {
967f97fa 1067 return;
5f5a1318 1068 }
967f97fa 1069
06dbfc6f 1070 stb_p(vdev->config + addr, val);
967f97fa 1071
181103cd
FK
1072 if (k->set_config) {
1073 k->set_config(vdev, vdev->config);
1074 }
967f97fa
AL
1075}
1076
53c25cea 1077void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 1078{
181103cd 1079 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
1080 uint16_t val = data;
1081
5f5a1318 1082 if (addr + sizeof(val) > vdev->config_len) {
967f97fa 1083 return;
5f5a1318 1084 }
967f97fa 1085
06dbfc6f 1086 stw_p(vdev->config + addr, val);
967f97fa 1087
181103cd
FK
1088 if (k->set_config) {
1089 k->set_config(vdev, vdev->config);
1090 }
967f97fa
AL
1091}
1092
53c25cea 1093void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 1094{
181103cd 1095 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
1096 uint32_t val = data;
1097
5f5a1318 1098 if (addr + sizeof(val) > vdev->config_len) {
967f97fa 1099 return;
5f5a1318 1100 }
967f97fa 1101
06dbfc6f 1102 stl_p(vdev->config + addr, val);
967f97fa 1103
181103cd
FK
1104 if (k->set_config) {
1105 k->set_config(vdev, vdev->config);
1106 }
967f97fa
AL
1107}
1108
adfb743c
MT
1109uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
1110{
1111 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1112 uint8_t val;
1113
1114 if (addr + sizeof(val) > vdev->config_len) {
1115 return (uint32_t)-1;
1116 }
1117
1118 k->get_config(vdev, vdev->config);
1119
1120 val = ldub_p(vdev->config + addr);
1121 return val;
1122}
1123
1124uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
1125{
1126 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1127 uint16_t val;
1128
1129 if (addr + sizeof(val) > vdev->config_len) {
1130 return (uint32_t)-1;
1131 }
1132
1133 k->get_config(vdev, vdev->config);
1134
1135 val = lduw_le_p(vdev->config + addr);
1136 return val;
1137}
1138
1139uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
1140{
1141 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1142 uint32_t val;
1143
1144 if (addr + sizeof(val) > vdev->config_len) {
1145 return (uint32_t)-1;
1146 }
1147
1148 k->get_config(vdev, vdev->config);
1149
1150 val = ldl_le_p(vdev->config + addr);
1151 return val;
1152}
1153
1154void virtio_config_modern_writeb(VirtIODevice *vdev,
1155 uint32_t addr, uint32_t data)
1156{
1157 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1158 uint8_t val = data;
1159
1160 if (addr + sizeof(val) > vdev->config_len) {
1161 return;
1162 }
1163
1164 stb_p(vdev->config + addr, val);
1165
1166 if (k->set_config) {
1167 k->set_config(vdev, vdev->config);
1168 }
1169}
1170
1171void virtio_config_modern_writew(VirtIODevice *vdev,
1172 uint32_t addr, uint32_t data)
1173{
1174 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1175 uint16_t val = data;
1176
1177 if (addr + sizeof(val) > vdev->config_len) {
1178 return;
1179 }
1180
1181 stw_le_p(vdev->config + addr, val);
1182
1183 if (k->set_config) {
1184 k->set_config(vdev, vdev->config);
1185 }
1186}
1187
1188void virtio_config_modern_writel(VirtIODevice *vdev,
1189 uint32_t addr, uint32_t data)
1190{
1191 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1192 uint32_t val = data;
1193
1194 if (addr + sizeof(val) > vdev->config_len) {
1195 return;
1196 }
1197
1198 stl_le_p(vdev->config + addr, val);
1199
1200 if (k->set_config) {
1201 k->set_config(vdev, vdev->config);
1202 }
1203}
1204
a8170e5e 1205void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
967f97fa 1206{
ab223c95
CH
1207 vdev->vq[n].vring.desc = addr;
1208 virtio_queue_update_rings(vdev, n);
53c25cea
PB
1209}
1210
a8170e5e 1211hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
53c25cea 1212{
ab223c95
CH
1213 return vdev->vq[n].vring.desc;
1214}
1215
1216void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
1217 hwaddr avail, hwaddr used)
1218{
1219 vdev->vq[n].vring.desc = desc;
1220 vdev->vq[n].vring.avail = avail;
1221 vdev->vq[n].vring.used = used;
53c25cea
PB
1222}
1223
e63c0ba1
PM
1224void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
1225{
f6049f44
PM
1226 /* Don't allow guest to flip queue between existent and
1227 * nonexistent states, or to set it to an invalid size.
1228 */
1229 if (!!num != !!vdev->vq[n].vring.num ||
1230 num > VIRTQUEUE_MAX_SIZE ||
1231 num < 0) {
1232 return;
e63c0ba1 1233 }
f6049f44 1234 vdev->vq[n].vring.num = num;
e63c0ba1
PM
1235}
1236
e0d686bf
JW
1237VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
1238{
1239 return QLIST_FIRST(&vdev->vector_queues[vector]);
1240}
1241
1242VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
1243{
1244 return QLIST_NEXT(vq, node);
1245}
1246
53c25cea
PB
1247int virtio_queue_get_num(VirtIODevice *vdev, int n)
1248{
1249 return vdev->vq[n].vring.num;
1250}
967f97fa 1251
8c797e75
MT
1252int virtio_queue_get_max_num(VirtIODevice *vdev, int n)
1253{
1254 return vdev->vq[n].vring.num_default;
1255}
1256
8ad176aa
JW
1257int virtio_get_num_queues(VirtIODevice *vdev)
1258{
1259 int i;
1260
87b3bd1c 1261 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
8ad176aa
JW
1262 if (!virtio_queue_get_num(vdev, i)) {
1263 break;
1264 }
1265 }
1266
1267 return i;
1268}
1269
6ce69d1c
PM
1270void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
1271{
1272 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1273 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1274
ab223c95 1275 /* virtio-1 compliant devices cannot change the alignment */
95129d6f 1276 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
ab223c95
CH
1277 error_report("tried to modify queue alignment for virtio-1 device");
1278 return;
1279 }
6ce69d1c
PM
1280 /* Check that the transport told us it was going to do this
1281 * (so a buggy transport will immediately assert rather than
1282 * silently failing to migrate this state)
1283 */
1284 assert(k->has_variable_vring_alignment);
1285
1286 vdev->vq[n].vring.align = align;
ab223c95 1287 virtio_queue_update_rings(vdev, n);
6ce69d1c
PM
1288}
1289
344dc16f
MT
1290static void virtio_queue_notify_aio_vq(VirtQueue *vq)
1291{
1292 if (vq->vring.desc && vq->handle_aio_output) {
1293 VirtIODevice *vdev = vq->vdev;
1294
1295 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1296 vq->handle_aio_output(vdev, vq);
1297 }
1298}
1299
2b2cbcad 1300static void virtio_queue_notify_vq(VirtQueue *vq)
25db9ebe 1301{
9e0f5b81 1302 if (vq->vring.desc && vq->handle_output) {
25db9ebe 1303 VirtIODevice *vdev = vq->vdev;
9e0f5b81 1304
f5ed3663
SH
1305 if (unlikely(vdev->broken)) {
1306 return;
1307 }
1308
25db9ebe
SH
1309 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1310 vq->handle_output(vdev, vq);
1311 }
1312}
1313
53c25cea
PB
1314void virtio_queue_notify(VirtIODevice *vdev, int n)
1315{
7157e2e2 1316 virtio_queue_notify_vq(&vdev->vq[n]);
967f97fa
AL
1317}
1318
7055e687
MT
1319uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
1320{
87b3bd1c 1321 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
7055e687
MT
1322 VIRTIO_NO_VECTOR;
1323}
1324
1325void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
1326{
e0d686bf
JW
1327 VirtQueue *vq = &vdev->vq[n];
1328
87b3bd1c 1329 if (n < VIRTIO_QUEUE_MAX) {
e0d686bf
JW
1330 if (vdev->vector_queues &&
1331 vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
1332 QLIST_REMOVE(vq, node);
1333 }
7055e687 1334 vdev->vq[n].vector = vector;
e0d686bf
JW
1335 if (vdev->vector_queues &&
1336 vector != VIRTIO_NO_VECTOR) {
1337 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
1338 }
1339 }
7055e687
MT
1340}
1341
f1ac6a55
PB
1342VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
1343 VirtIOHandleOutput handle_output)
967f97fa
AL
1344{
1345 int i;
1346
87b3bd1c 1347 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
967f97fa
AL
1348 if (vdev->vq[i].vring.num == 0)
1349 break;
1350 }
1351
87b3bd1c 1352 if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
967f97fa
AL
1353 abort();
1354
1355 vdev->vq[i].vring.num = queue_size;
46c5d082 1356 vdev->vq[i].vring.num_default = queue_size;
6ce69d1c 1357 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
967f97fa 1358 vdev->vq[i].handle_output = handle_output;
344dc16f 1359 vdev->vq[i].handle_aio_output = NULL;
967f97fa
AL
1360
1361 return &vdev->vq[i];
1362}
1363
f23fd811
JW
1364void virtio_del_queue(VirtIODevice *vdev, int n)
1365{
87b3bd1c 1366 if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
f23fd811
JW
1367 abort();
1368 }
1369
1370 vdev->vq[n].vring.num = 0;
46c5d082 1371 vdev->vq[n].vring.num_default = 0;
f23fd811
JW
1372}
1373
0687c37c
PB
1374static void virtio_set_isr(VirtIODevice *vdev, int value)
1375{
1376 uint8_t old = atomic_read(&vdev->isr);
1377
1378 /* Do not write ISR if it does not change, so that its cacheline remains
1379 * shared in the common case where the guest does not read it.
1380 */
1381 if ((old & value) != value) {
1382 atomic_or(&vdev->isr, value);
1383 }
1384}
1385
c25d97c4 1386static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
bcbabae8
MT
1387{
1388 uint16_t old, new;
1389 bool v;
a281ebc1
MT
1390 /* We need to expose used array entries before checking used event. */
1391 smp_mb();
97b83deb 1392 /* Always notify when queue is empty (when feature acknowledge) */
95129d6f 1393 if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
be1fea9b 1394 !vq->inuse && virtio_queue_empty(vq)) {
bcbabae8
MT
1395 return true;
1396 }
1397
95129d6f 1398 if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
bcbabae8
MT
1399 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
1400 }
1401
1402 v = vq->signalled_used_valid;
1403 vq->signalled_used_valid = true;
1404 old = vq->signalled_used;
b796fcd1 1405 new = vq->signalled_used = vq->used_idx;
e9600c6c 1406 return !v || vring_need_event(vring_get_used_event(vq), new, old);
bcbabae8
MT
1407}
1408
83d768b5
PB
1409void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
1410{
1411 if (!virtio_should_notify(vdev, vq)) {
1412 return;
1413 }
1414
1415 trace_virtio_notify_irqfd(vdev, vq);
1416
1417 /*
1418 * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
1419 * windows drivers included in virtio-win 1.8.0 (circa 2015) are
1420 * incorrectly polling this bit during crashdump and hibernation
1421 * in MSI mode, causing a hang if this bit is never updated.
1422 * Recent releases of Windows do not really shut down, but rather
1423 * log out and hibernate to make the next startup faster. Hence,
1424 * this manifested as a more serious hang during shutdown with
1425 *
1426 * Next driver release from 2016 fixed this problem, so working around it
1427 * is not a must, but it's easy to do so let's do it here.
1428 *
1429 * Note: it's safe to update ISR from any thread as it was switched
1430 * to an atomic operation.
1431 */
1432 virtio_set_isr(vq->vdev, 0x1);
1433 event_notifier_set(&vq->guest_notifier);
1434}
1435
bcbabae8
MT
1436void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
1437{
adb3feda 1438 if (!virtio_should_notify(vdev, vq)) {
967f97fa 1439 return;
bcbabae8 1440 }
967f97fa 1441
64979a4d 1442 trace_virtio_notify(vdev, vq);
0687c37c 1443 virtio_set_isr(vq->vdev, 0x1);
7055e687 1444 virtio_notify_vector(vdev, vq->vector);
967f97fa
AL
1445}
1446
1447void virtio_notify_config(VirtIODevice *vdev)
1448{
7625162c
AL
1449 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
1450 return;
1451
0687c37c 1452 virtio_set_isr(vdev, 0x3);
b8f05908 1453 vdev->generation++;
7055e687 1454 virtio_notify_vector(vdev, vdev->config_vector);
967f97fa
AL
1455}
1456
616a6552
GK
1457static bool virtio_device_endian_needed(void *opaque)
1458{
1459 VirtIODevice *vdev = opaque;
1460
1461 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
95129d6f 1462 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3c185597
CH
1463 return vdev->device_endian != virtio_default_endian();
1464 }
1465 /* Devices conforming to VIRTIO 1.0 or later are always LE. */
1466 return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
616a6552
GK
1467}
1468
019a3edb
GH
1469static bool virtio_64bit_features_needed(void *opaque)
1470{
1471 VirtIODevice *vdev = opaque;
1472
1473 return (vdev->host_features >> 32) != 0;
1474}
1475
74aae7b2
JW
1476static bool virtio_virtqueue_needed(void *opaque)
1477{
1478 VirtIODevice *vdev = opaque;
1479
1480 return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
1481}
1482
46c5d082
CH
1483static bool virtio_ringsize_needed(void *opaque)
1484{
1485 VirtIODevice *vdev = opaque;
1486 int i;
1487
1488 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1489 if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
1490 return true;
1491 }
1492 }
1493 return false;
1494}
1495
a6df8adf
JW
1496static bool virtio_extra_state_needed(void *opaque)
1497{
1498 VirtIODevice *vdev = opaque;
1499 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1500 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1501
1502 return k->has_extra_state &&
1503 k->has_extra_state(qbus->parent);
1504}
1505
791b1daf
SH
1506static bool virtio_broken_needed(void *opaque)
1507{
1508 VirtIODevice *vdev = opaque;
1509
1510 return vdev->broken;
1511}
1512
50e5ae4d 1513static const VMStateDescription vmstate_virtqueue = {
74aae7b2 1514 .name = "virtqueue_state",
50e5ae4d
DDAG
1515 .version_id = 1,
1516 .minimum_version_id = 1,
1517 .fields = (VMStateField[]) {
1518 VMSTATE_UINT64(vring.avail, struct VirtQueue),
1519 VMSTATE_UINT64(vring.used, struct VirtQueue),
1520 VMSTATE_END_OF_LIST()
1521 }
74aae7b2
JW
1522};
1523
1524static const VMStateDescription vmstate_virtio_virtqueues = {
1525 .name = "virtio/virtqueues",
1526 .version_id = 1,
1527 .minimum_version_id = 1,
1528 .needed = &virtio_virtqueue_needed,
1529 .fields = (VMStateField[]) {
3e996cc5
DDAG
1530 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
1531 VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
74aae7b2
JW
1532 VMSTATE_END_OF_LIST()
1533 }
1534};
1535
50e5ae4d 1536static const VMStateDescription vmstate_ringsize = {
46c5d082 1537 .name = "ringsize_state",
50e5ae4d
DDAG
1538 .version_id = 1,
1539 .minimum_version_id = 1,
1540 .fields = (VMStateField[]) {
1541 VMSTATE_UINT32(vring.num_default, struct VirtQueue),
1542 VMSTATE_END_OF_LIST()
1543 }
46c5d082
CH
1544};
1545
1546static const VMStateDescription vmstate_virtio_ringsize = {
1547 .name = "virtio/ringsize",
1548 .version_id = 1,
1549 .minimum_version_id = 1,
1550 .needed = &virtio_ringsize_needed,
1551 .fields = (VMStateField[]) {
3e996cc5
DDAG
1552 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
1553 VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
46c5d082
CH
1554 VMSTATE_END_OF_LIST()
1555 }
1556};
1557
2c21ee76
JD
1558static int get_extra_state(QEMUFile *f, void *pv, size_t size,
1559 VMStateField *field)
a6df8adf
JW
1560{
1561 VirtIODevice *vdev = pv;
1562 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1563 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1564
1565 if (!k->load_extra_state) {
1566 return -1;
1567 } else {
1568 return k->load_extra_state(qbus->parent, f);
1569 }
1570}
1571
2c21ee76
JD
1572static int put_extra_state(QEMUFile *f, void *pv, size_t size,
1573 VMStateField *field, QJSON *vmdesc)
a6df8adf
JW
1574{
1575 VirtIODevice *vdev = pv;
1576 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1577 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1578
1579 k->save_extra_state(qbus->parent, f);
2c21ee76 1580 return 0;
a6df8adf
JW
1581}
1582
1583static const VMStateInfo vmstate_info_extra_state = {
1584 .name = "virtqueue_extra_state",
1585 .get = get_extra_state,
1586 .put = put_extra_state,
1587};
1588
1589static const VMStateDescription vmstate_virtio_extra_state = {
1590 .name = "virtio/extra_state",
1591 .version_id = 1,
1592 .minimum_version_id = 1,
1593 .needed = &virtio_extra_state_needed,
1594 .fields = (VMStateField[]) {
1595 {
1596 .name = "extra_state",
1597 .version_id = 0,
1598 .field_exists = NULL,
1599 .size = 0,
1600 .info = &vmstate_info_extra_state,
1601 .flags = VMS_SINGLE,
1602 .offset = 0,
1603 },
1604 VMSTATE_END_OF_LIST()
1605 }
1606};
1607
616a6552
GK
1608static const VMStateDescription vmstate_virtio_device_endian = {
1609 .name = "virtio/device_endian",
1610 .version_id = 1,
1611 .minimum_version_id = 1,
5cd8cada 1612 .needed = &virtio_device_endian_needed,
616a6552
GK
1613 .fields = (VMStateField[]) {
1614 VMSTATE_UINT8(device_endian, VirtIODevice),
1615 VMSTATE_END_OF_LIST()
1616 }
1617};
1618
019a3edb
GH
1619static const VMStateDescription vmstate_virtio_64bit_features = {
1620 .name = "virtio/64bit_features",
1621 .version_id = 1,
1622 .minimum_version_id = 1,
5cd8cada 1623 .needed = &virtio_64bit_features_needed,
019a3edb
GH
1624 .fields = (VMStateField[]) {
1625 VMSTATE_UINT64(guest_features, VirtIODevice),
1626 VMSTATE_END_OF_LIST()
1627 }
1628};
1629
791b1daf
SH
1630static const VMStateDescription vmstate_virtio_broken = {
1631 .name = "virtio/broken",
1632 .version_id = 1,
1633 .minimum_version_id = 1,
1634 .needed = &virtio_broken_needed,
1635 .fields = (VMStateField[]) {
1636 VMSTATE_BOOL(broken, VirtIODevice),
1637 VMSTATE_END_OF_LIST()
1638 }
1639};
1640
6b321a3d
GK
1641static const VMStateDescription vmstate_virtio = {
1642 .name = "virtio",
1643 .version_id = 1,
1644 .minimum_version_id = 1,
1645 .minimum_version_id_old = 1,
1646 .fields = (VMStateField[]) {
1647 VMSTATE_END_OF_LIST()
616a6552 1648 },
5cd8cada
JQ
1649 .subsections = (const VMStateDescription*[]) {
1650 &vmstate_virtio_device_endian,
1651 &vmstate_virtio_64bit_features,
74aae7b2 1652 &vmstate_virtio_virtqueues,
46c5d082 1653 &vmstate_virtio_ringsize,
791b1daf 1654 &vmstate_virtio_broken,
a6df8adf 1655 &vmstate_virtio_extra_state,
5cd8cada 1656 NULL
6b321a3d
GK
1657 }
1658};
1659
967f97fa
AL
1660void virtio_save(VirtIODevice *vdev, QEMUFile *f)
1661{
1c819449
FK
1662 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1663 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1b5fc0de 1664 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
019a3edb 1665 uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
967f97fa
AL
1666 int i;
1667
1c819449
FK
1668 if (k->save_config) {
1669 k->save_config(qbus->parent, f);
1670 }
967f97fa 1671
967f97fa
AL
1672 qemu_put_8s(f, &vdev->status);
1673 qemu_put_8s(f, &vdev->isr);
1674 qemu_put_be16s(f, &vdev->queue_sel);
019a3edb 1675 qemu_put_be32s(f, &guest_features_lo);
967f97fa
AL
1676 qemu_put_be32(f, vdev->config_len);
1677 qemu_put_buffer(f, vdev->config, vdev->config_len);
1678
87b3bd1c 1679 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
967f97fa
AL
1680 if (vdev->vq[i].vring.num == 0)
1681 break;
1682 }
1683
1684 qemu_put_be32(f, i);
1685
87b3bd1c 1686 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
967f97fa
AL
1687 if (vdev->vq[i].vring.num == 0)
1688 break;
1689
1690 qemu_put_be32(f, vdev->vq[i].vring.num);
6ce69d1c
PM
1691 if (k->has_variable_vring_alignment) {
1692 qemu_put_be32(f, vdev->vq[i].vring.align);
1693 }
ab223c95
CH
1694 /* XXX virtio-1 devices */
1695 qemu_put_be64(f, vdev->vq[i].vring.desc);
967f97fa 1696 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
1c819449
FK
1697 if (k->save_queue) {
1698 k->save_queue(qbus->parent, i, f);
1699 }
967f97fa 1700 }
1b5fc0de
GK
1701
1702 if (vdc->save != NULL) {
1703 vdc->save(vdev, f);
1704 }
6b321a3d 1705
ea43e259
DDAG
1706 if (vdc->vmsd) {
1707 vmstate_save_state(f, vdc->vmsd, vdev, NULL);
1708 }
1709
6b321a3d 1710 /* Subsections */
8118f095 1711 vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
967f97fa
AL
1712}
1713
1a665855 1714/* A wrapper for use as a VMState .put function */
2c21ee76
JD
1715static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
1716 VMStateField *field, QJSON *vmdesc)
1a665855
HP
1717{
1718 virtio_save(VIRTIO_DEVICE(opaque), f);
2c21ee76
JD
1719
1720 return 0;
1a665855
HP
1721}
1722
1723/* A wrapper for use as a VMState .get function */
2c21ee76
JD
1724static int virtio_device_get(QEMUFile *f, void *opaque, size_t size,
1725 VMStateField *field)
1a665855
HP
1726{
1727 VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
1728 DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
1729
1730 return virtio_load(vdev, f, dc->vmsd->version_id);
1731}
1732
1733const VMStateInfo virtio_vmstate_info = {
1734 .name = "virtio",
1735 .get = virtio_device_get,
1736 .put = virtio_device_put,
1737};
1738
6c0196d7 1739static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
ad0c9332 1740{
181103cd 1741 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
6b8f1020 1742 bool bad = (val & ~(vdev->host_features)) != 0;
ad0c9332 1743
6b8f1020 1744 val &= vdev->host_features;
181103cd
FK
1745 if (k->set_features) {
1746 k->set_features(vdev, val);
ad0c9332
PB
1747 }
1748 vdev->guest_features = val;
1749 return bad ? -1 : 0;
1750}
1751
6c0196d7
CH
1752int virtio_set_features(VirtIODevice *vdev, uint64_t val)
1753{
1754 /*
1755 * The driver must not attempt to set features after feature negotiation
1756 * has finished.
1757 */
1758 if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
1759 return -EINVAL;
1760 }
1761 return virtio_set_features_nocheck(vdev, val);
1762}
1763
1b5fc0de 1764int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
967f97fa 1765{
cc459952 1766 int i, ret;
a890a2f9 1767 int32_t config_len;
cc459952 1768 uint32_t num;
6d74ca5a 1769 uint32_t features;
1c819449
FK
1770 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1771 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1b5fc0de 1772 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa 1773
616a6552
GK
1774 /*
1775 * We poison the endianness to ensure it does not get used before
1776 * subsections have been loaded.
1777 */
1778 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
1779
1c819449
FK
1780 if (k->load_config) {
1781 ret = k->load_config(qbus->parent, f);
ff24bd58
MT
1782 if (ret)
1783 return ret;
1784 }
967f97fa 1785
967f97fa
AL
1786 qemu_get_8s(f, &vdev->status);
1787 qemu_get_8s(f, &vdev->isr);
1788 qemu_get_be16s(f, &vdev->queue_sel);
87b3bd1c 1789 if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
4b53c2c7
MR
1790 return -1;
1791 }
6d74ca5a 1792 qemu_get_be32s(f, &features);
ad0c9332 1793
62cee1a2
MT
1794 /*
1795 * Temporarily set guest_features low bits - needed by
1796 * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
1797 * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
1798 *
1799 * Note: devices should always test host features in future - don't create
1800 * new dependencies like this.
1801 */
1802 vdev->guest_features = features;
1803
a890a2f9 1804 config_len = qemu_get_be32(f);
2f5732e9
DDAG
1805
1806 /*
1807 * There are cases where the incoming config can be bigger or smaller
1808 * than what we have; so load what we have space for, and skip
1809 * any excess that's in the stream.
1810 */
1811 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
1812
1813 while (config_len > vdev->config_len) {
1814 qemu_get_byte(f);
1815 config_len--;
a890a2f9 1816 }
967f97fa
AL
1817
1818 num = qemu_get_be32(f);
1819
87b3bd1c 1820 if (num > VIRTIO_QUEUE_MAX) {
8a1be662 1821 error_report("Invalid number of virtqueues: 0x%x", num);
cc459952
MT
1822 return -1;
1823 }
1824
967f97fa
AL
1825 for (i = 0; i < num; i++) {
1826 vdev->vq[i].vring.num = qemu_get_be32(f);
6ce69d1c
PM
1827 if (k->has_variable_vring_alignment) {
1828 vdev->vq[i].vring.align = qemu_get_be32(f);
1829 }
ab223c95 1830 vdev->vq[i].vring.desc = qemu_get_be64(f);
967f97fa 1831 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
bcbabae8 1832 vdev->vq[i].signalled_used_valid = false;
332fa82d 1833 vdev->vq[i].notification = true;
967f97fa 1834
ab223c95
CH
1835 if (vdev->vq[i].vring.desc) {
1836 /* XXX virtio-1 devices */
1837 virtio_queue_update_rings(vdev, i);
1abeb5a6
MT
1838 } else if (vdev->vq[i].last_avail_idx) {
1839 error_report("VQ %d address 0x0 "
6daf194d 1840 "inconsistent with Host index 0x%x",
1abeb5a6
MT
1841 i, vdev->vq[i].last_avail_idx);
1842 return -1;
8275e2f6 1843 }
1c819449
FK
1844 if (k->load_queue) {
1845 ret = k->load_queue(qbus->parent, i, f);
ff24bd58
MT
1846 if (ret)
1847 return ret;
7055e687 1848 }
967f97fa
AL
1849 }
1850
7055e687 1851 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1b5fc0de
GK
1852
1853 if (vdc->load != NULL) {
6b321a3d
GK
1854 ret = vdc->load(vdev, f, version_id);
1855 if (ret) {
1856 return ret;
1857 }
1b5fc0de
GK
1858 }
1859
ea43e259
DDAG
1860 if (vdc->vmsd) {
1861 ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
1862 if (ret) {
1863 return ret;
1864 }
1865 }
1866
616a6552
GK
1867 /* Subsections */
1868 ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
1869 if (ret) {
1870 return ret;
1871 }
1872
1873 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
1874 vdev->device_endian = virtio_default_endian();
1875 }
1876
019a3edb
GH
1877 if (virtio_64bit_features_needed(vdev)) {
1878 /*
1879 * Subsection load filled vdev->guest_features. Run them
1880 * through virtio_set_features to sanity-check them against
1881 * host_features.
1882 */
1883 uint64_t features64 = vdev->guest_features;
6c0196d7 1884 if (virtio_set_features_nocheck(vdev, features64) < 0) {
019a3edb
GH
1885 error_report("Features 0x%" PRIx64 " unsupported. "
1886 "Allowed features: 0x%" PRIx64,
1887 features64, vdev->host_features);
1888 return -1;
1889 }
1890 } else {
6c0196d7 1891 if (virtio_set_features_nocheck(vdev, features) < 0) {
019a3edb
GH
1892 error_report("Features 0x%x unsupported. "
1893 "Allowed features: 0x%" PRIx64,
1894 features, vdev->host_features);
1895 return -1;
1896 }
1897 }
1898
616a6552 1899 for (i = 0; i < num; i++) {
ab223c95 1900 if (vdev->vq[i].vring.desc) {
616a6552
GK
1901 uint16_t nheads;
1902 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
1903 /* Check it isn't doing strange things with descriptor numbers. */
1904 if (nheads > vdev->vq[i].vring.num) {
1905 error_report("VQ %d size 0x%x Guest index 0x%x "
1906 "inconsistent with Host index 0x%x: delta 0x%x",
1907 i, vdev->vq[i].vring.num,
1908 vring_avail_idx(&vdev->vq[i]),
1909 vdev->vq[i].last_avail_idx, nheads);
1910 return -1;
1911 }
b796fcd1 1912 vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
be1fea9b 1913 vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
bccdef6b
SH
1914
1915 /*
1916 * Some devices migrate VirtQueueElements that have been popped
1917 * from the avail ring but not yet returned to the used ring.
e66bcc40
HP
1918 * Since max ring size < UINT16_MAX it's safe to use modulo
1919 * UINT16_MAX + 1 subtraction.
bccdef6b 1920 */
e66bcc40
HP
1921 vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
1922 vdev->vq[i].used_idx);
bccdef6b
SH
1923 if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
1924 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
1925 "used_idx 0x%x",
1926 i, vdev->vq[i].vring.num,
1927 vdev->vq[i].last_avail_idx,
1928 vdev->vq[i].used_idx);
1929 return -1;
1930 }
616a6552
GK
1931 }
1932 }
1933
1934 return 0;
967f97fa
AL
1935}
1936
6a1a8cc7 1937void virtio_cleanup(VirtIODevice *vdev)
b946a153 1938{
85cf2a8d 1939 qemu_del_vm_change_state_handler(vdev->vmstate);
6f79e06b 1940 g_free(vdev->config);
7267c094 1941 g_free(vdev->vq);
e0d686bf 1942 g_free(vdev->vector_queues);
8e05db92
FK
1943}
1944
1dfb4dd9 1945static void virtio_vmstate_change(void *opaque, int running, RunState state)
85cf2a8d
MT
1946{
1947 VirtIODevice *vdev = opaque;
1c819449
FK
1948 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1949 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
85cf2a8d 1950 bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
9e8e8c48 1951 vdev->vm_running = running;
85cf2a8d
MT
1952
1953 if (backend_run) {
1954 virtio_set_status(vdev, vdev->status);
1955 }
1956
1c819449
FK
1957 if (k->vmstate_change) {
1958 k->vmstate_change(qbus->parent, backend_run);
85cf2a8d
MT
1959 }
1960
1961 if (!backend_run) {
1962 virtio_set_status(vdev, vdev->status);
1963 }
1964}
1965
c8075caf
GA
1966void virtio_instance_init_common(Object *proxy_obj, void *data,
1967 size_t vdev_size, const char *vdev_name)
1968{
1969 DeviceState *vdev = data;
1970
1971 object_initialize(vdev, vdev_size, vdev_name);
1972 object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL);
1973 object_unref(OBJECT(vdev));
1974 qdev_alias_all_properties(vdev, proxy_obj);
1975}
1976
8e05db92
FK
1977void virtio_init(VirtIODevice *vdev, const char *name,
1978 uint16_t device_id, size_t config_size)
967f97fa 1979{
e0d686bf
JW
1980 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1981 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
b8193adb 1982 int i;
e0d686bf
JW
1983 int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
1984
1985 if (nvectors) {
1986 vdev->vector_queues =
1987 g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
1988 }
1989
53c25cea 1990 vdev->device_id = device_id;
967f97fa 1991 vdev->status = 0;
0687c37c 1992 atomic_set(&vdev->isr, 0);
967f97fa 1993 vdev->queue_sel = 0;
7055e687 1994 vdev->config_vector = VIRTIO_NO_VECTOR;
87b3bd1c 1995 vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
1354869c 1996 vdev->vm_running = runstate_is_running();
f5ed3663 1997 vdev->broken = false;
87b3bd1c 1998 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
b8193adb 1999 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
1cbdabe2 2000 vdev->vq[i].vdev = vdev;
e78a2b42 2001 vdev->vq[i].queue_index = i;
1cbdabe2 2002 }
967f97fa 2003
967f97fa
AL
2004 vdev->name = name;
2005 vdev->config_len = config_size;
8e05db92 2006 if (vdev->config_len) {
7267c094 2007 vdev->config = g_malloc0(config_size);
8e05db92 2008 } else {
967f97fa 2009 vdev->config = NULL;
8e05db92
FK
2010 }
2011 vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
2012 vdev);
616a6552 2013 vdev->device_endian = virtio_default_endian();
5669655a 2014 vdev->use_guest_notifier_mask = true;
8e05db92 2015}
967f97fa 2016
a8170e5e 2017hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
1cbdabe2
MT
2018{
2019 return vdev->vq[n].vring.desc;
2020}
2021
a8170e5e 2022hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
1cbdabe2
MT
2023{
2024 return vdev->vq[n].vring.avail;
2025}
2026
a8170e5e 2027hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
1cbdabe2
MT
2028{
2029 return vdev->vq[n].vring.used;
2030}
2031
a8170e5e 2032hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
1cbdabe2
MT
2033{
2034 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
2035}
2036
a8170e5e 2037hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
1cbdabe2
MT
2038{
2039 return offsetof(VRingAvail, ring) +
50764fc8 2040 sizeof(uint16_t) * vdev->vq[n].vring.num;
1cbdabe2
MT
2041}
2042
a8170e5e 2043hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
1cbdabe2
MT
2044{
2045 return offsetof(VRingUsed, ring) +
2046 sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
2047}
2048
1cbdabe2
MT
2049uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
2050{
2051 return vdev->vq[n].last_avail_idx;
2052}
2053
2054void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
2055{
2056 vdev->vq[n].last_avail_idx = idx;
be1fea9b 2057 vdev->vq[n].shadow_avail_idx = idx;
1cbdabe2
MT
2058}
2059
312d3b35
YB
2060void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
2061{
2062 vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
2063}
2064
6793dfd1
SH
2065void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
2066{
2067 vdev->vq[n].signalled_used_valid = false;
2068}
2069
1cbdabe2
MT
2070VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
2071{
2072 return vdev->vq + n;
2073}
2074
e78a2b42
JW
2075uint16_t virtio_get_queue_index(VirtQueue *vq)
2076{
2077 return vq->queue_index;
2078}
2079
15b2bd18
PB
2080static void virtio_queue_guest_notifier_read(EventNotifier *n)
2081{
2082 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
2083 if (event_notifier_test_and_clear(n)) {
83d768b5 2084 virtio_notify_vector(vq->vdev, vq->vector);
15b2bd18
PB
2085 }
2086}
2087
2088void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
2089 bool with_irqfd)
2090{
2091 if (assign && !with_irqfd) {
d6da1e9e 2092 event_notifier_set_handler(&vq->guest_notifier,
15b2bd18
PB
2093 virtio_queue_guest_notifier_read);
2094 } else {
d6da1e9e 2095 event_notifier_set_handler(&vq->guest_notifier, NULL);
15b2bd18
PB
2096 }
2097 if (!assign) {
2098 /* Test and clear notifier before closing it,
2099 * in case poll callback didn't have time to run. */
2100 virtio_queue_guest_notifier_read(&vq->guest_notifier);
2101 }
2102}
2103
1cbdabe2
MT
2104EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
2105{
2106 return &vq->guest_notifier;
2107}
b1f416aa 2108
344dc16f 2109static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
b1f416aa
PB
2110{
2111 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2112 if (event_notifier_test_and_clear(n)) {
344dc16f 2113 virtio_queue_notify_aio_vq(vq);
b1f416aa
PB
2114 }
2115}
2116
a7c8215e
SH
2117static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
2118{
2119 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2120
2121 virtio_queue_set_notification(vq, 0);
2122}
2123
0062ea0f
SH
2124static bool virtio_queue_host_notifier_aio_poll(void *opaque)
2125{
2126 EventNotifier *n = opaque;
2127 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2128
2129 if (virtio_queue_empty(vq)) {
2130 return false;
2131 }
2132
2133 virtio_queue_notify_aio_vq(vq);
1448c133
SH
2134
2135 /* In case the handler function re-enabled notifications */
2136 virtio_queue_set_notification(vq, 0);
0062ea0f
SH
2137 return true;
2138}
2139
a7c8215e
SH
2140static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
2141{
2142 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2143
2144 /* Caller polls once more after this to catch requests that race with us */
2145 virtio_queue_set_notification(vq, 1);
2146}
2147
a1afb606 2148void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
bf1780b0 2149 VirtIOHandleOutput handle_output)
a1afb606 2150{
a378b49a
PB
2151 if (handle_output) {
2152 vq->handle_aio_output = handle_output;
a1afb606 2153 aio_set_event_notifier(ctx, &vq->host_notifier, true,
0062ea0f
SH
2154 virtio_queue_host_notifier_aio_read,
2155 virtio_queue_host_notifier_aio_poll);
a7c8215e
SH
2156 aio_set_event_notifier_poll(ctx, &vq->host_notifier,
2157 virtio_queue_host_notifier_aio_poll_begin,
2158 virtio_queue_host_notifier_aio_poll_end);
a1afb606 2159 } else {
f6a51c84 2160 aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL);
a1afb606
PB
2161 /* Test and clear notifier before after disabling event,
2162 * in case poll callback didn't have time to run. */
344dc16f 2163 virtio_queue_host_notifier_aio_read(&vq->host_notifier);
a378b49a 2164 vq->handle_aio_output = NULL;
344dc16f
MT
2165 }
2166}
2167
fa283a4a 2168void virtio_queue_host_notifier_read(EventNotifier *n)
344dc16f
MT
2169{
2170 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2171 if (event_notifier_test_and_clear(n)) {
2172 virtio_queue_notify_vq(vq);
a1afb606
PB
2173 }
2174}
2175
1cbdabe2
MT
2176EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
2177{
2178 return &vq->host_notifier;
2179}
8e05db92 2180
1034e9cf
FK
2181void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
2182{
9e288406 2183 g_free(vdev->bus_name);
80e0090a 2184 vdev->bus_name = g_strdup(bus_name);
1034e9cf
FK
2185}
2186
f5ed3663
SH
2187void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
2188{
2189 va_list ap;
2190
2191 va_start(ap, fmt);
2192 error_vreport(fmt, ap);
2193 va_end(ap);
2194
2195 vdev->broken = true;
2196
2197 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2198 virtio_set_status(vdev, vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET);
2199 virtio_notify_config(vdev);
2200 }
2201}
2202
1d244b42
AF
2203static void virtio_device_realize(DeviceState *dev, Error **errp)
2204{
2205 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2206 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
2207 Error *err = NULL;
2208
ea43e259
DDAG
2209 /* Devices should either use vmsd or the load/save methods */
2210 assert(!vdc->vmsd || !vdc->load);
2211
1d244b42
AF
2212 if (vdc->realize != NULL) {
2213 vdc->realize(dev, &err);
2214 if (err != NULL) {
2215 error_propagate(errp, err);
2216 return;
2217 }
8e05db92 2218 }
e8398045
JW
2219
2220 virtio_bus_device_plugged(vdev, &err);
2221 if (err != NULL) {
2222 error_propagate(errp, err);
2223 return;
2224 }
8e05db92
FK
2225}
2226
1d244b42 2227static void virtio_device_unrealize(DeviceState *dev, Error **errp)
1034e9cf 2228{
1d244b42 2229 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
306ec6c3
AF
2230 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
2231 Error *err = NULL;
1d244b42 2232
83d07047
PB
2233 virtio_bus_device_unplugged(vdev);
2234
306ec6c3
AF
2235 if (vdc->unrealize != NULL) {
2236 vdc->unrealize(dev, &err);
2237 if (err != NULL) {
2238 error_propagate(errp, err);
2239 return;
2240 }
5e96f5d2 2241 }
1d244b42 2242
9e288406
MA
2243 g_free(vdev->bus_name);
2244 vdev->bus_name = NULL;
1034e9cf
FK
2245}
2246
6b8f1020
CH
2247static Property virtio_properties[] = {
2248 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
2249 DEFINE_PROP_END_OF_LIST(),
2250};
2251
ff4c07df
PB
2252static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
2253{
2254 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
ff4c07df
PB
2255 int n, r, err;
2256
2257 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
fa283a4a 2258 VirtQueue *vq = &vdev->vq[n];
ff4c07df
PB
2259 if (!virtio_queue_get_num(vdev, n)) {
2260 continue;
2261 }
ed08a2a0 2262 r = virtio_bus_set_host_notifier(qbus, n, true);
ff4c07df
PB
2263 if (r < 0) {
2264 err = r;
2265 goto assign_error;
2266 }
d6da1e9e 2267 event_notifier_set_handler(&vq->host_notifier,
fa283a4a 2268 virtio_queue_host_notifier_read);
6019f3b9
PB
2269 }
2270
2271 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2272 /* Kick right away to begin processing requests already in vring */
2273 VirtQueue *vq = &vdev->vq[n];
2274 if (!vq->vring.num) {
2275 continue;
2276 }
2277 event_notifier_set(&vq->host_notifier);
ff4c07df
PB
2278 }
2279 return 0;
2280
2281assign_error:
2282 while (--n >= 0) {
fa283a4a 2283 VirtQueue *vq = &vdev->vq[n];
ff4c07df
PB
2284 if (!virtio_queue_get_num(vdev, n)) {
2285 continue;
2286 }
2287
d6da1e9e 2288 event_notifier_set_handler(&vq->host_notifier, NULL);
ed08a2a0 2289 r = virtio_bus_set_host_notifier(qbus, n, false);
ff4c07df
PB
2290 assert(r >= 0);
2291 }
2292 return err;
2293}
2294
2295int virtio_device_start_ioeventfd(VirtIODevice *vdev)
2296{
2297 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2298 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2299
2300 return virtio_bus_start_ioeventfd(vbus);
2301}
2302
2303static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
2304{
2305 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
ff4c07df
PB
2306 int n, r;
2307
2308 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
fa283a4a
PB
2309 VirtQueue *vq = &vdev->vq[n];
2310
ff4c07df
PB
2311 if (!virtio_queue_get_num(vdev, n)) {
2312 continue;
2313 }
d6da1e9e 2314 event_notifier_set_handler(&vq->host_notifier, NULL);
ed08a2a0 2315 r = virtio_bus_set_host_notifier(qbus, n, false);
ff4c07df
PB
2316 assert(r >= 0);
2317 }
2318}
2319
2320void virtio_device_stop_ioeventfd(VirtIODevice *vdev)
2321{
2322 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2323 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2324
2325 virtio_bus_stop_ioeventfd(vbus);
2326}
2327
310837de
PB
2328int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
2329{
2330 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2331 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2332
2333 return virtio_bus_grab_ioeventfd(vbus);
2334}
2335
2336void virtio_device_release_ioeventfd(VirtIODevice *vdev)
2337{
2338 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2339 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2340
2341 virtio_bus_release_ioeventfd(vbus);
2342}
2343
8e05db92
FK
2344static void virtio_device_class_init(ObjectClass *klass, void *data)
2345{
2346 /* Set the default value here. */
ff4c07df 2347 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
8e05db92 2348 DeviceClass *dc = DEVICE_CLASS(klass);
1d244b42
AF
2349
2350 dc->realize = virtio_device_realize;
2351 dc->unrealize = virtio_device_unrealize;
8e05db92 2352 dc->bus_type = TYPE_VIRTIO_BUS;
6b8f1020 2353 dc->props = virtio_properties;
ff4c07df
PB
2354 vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
2355 vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
9b706dbb
MT
2356
2357 vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
8e05db92
FK
2358}
2359
8e93cef1
PB
2360bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
2361{
2362 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2363 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2364
2365 return virtio_bus_ioeventfd_enabled(vbus);
2366}
2367
8e05db92
FK
2368static const TypeInfo virtio_device_info = {
2369 .name = TYPE_VIRTIO_DEVICE,
2370 .parent = TYPE_DEVICE,
2371 .instance_size = sizeof(VirtIODevice),
2372 .class_init = virtio_device_class_init,
2373 .abstract = true,
2374 .class_size = sizeof(VirtioDeviceClass),
2375};
2376
2377static void virtio_register_types(void)
2378{
2379 type_register_static(&virtio_device_info);
2380}
2381
2382type_init(virtio_register_types)
This page took 1.245537 seconds and 4 git commands to generate.