]> Git Repo - qemu.git/blame - hw/virtio/virtio.c
net: vhost stop updates virtio queue state
[qemu.git] / hw / virtio / virtio.c
CommitLineData
967f97fa
AL
1/*
2 * Virtio Support
3 *
4 * Copyright IBM, Corp. 2007
5 *
6 * Authors:
7 * Anthony Liguori <[email protected]>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
9b8bfe21 14#include "qemu/osdep.h"
da34e65c 15#include "qapi/error.h"
4771d756
PB
16#include "qemu-common.h"
17#include "cpu.h"
64979a4d 18#include "trace.h"
fdfba1a2 19#include "exec/address-spaces.h"
1de7afc9 20#include "qemu/error-report.h"
0d09e41a 21#include "hw/virtio/virtio.h"
1de7afc9 22#include "qemu/atomic.h"
0d09e41a 23#include "hw/virtio/virtio-bus.h"
6b321a3d 24#include "migration/migration.h"
cee3ca00 25#include "hw/virtio/virtio-access.h"
8607f5c3 26#include "sysemu/dma.h"
967f97fa 27
6ce69d1c
PM
28/*
29 * The alignment to use between consumer and producer parts of vring.
30 * x86 pagesize again. This is the default, used by transports like PCI
31 * which don't provide a means for the guest to tell the host the alignment.
32 */
f46f15bc
AL
33#define VIRTIO_PCI_VRING_ALIGN 4096
34
967f97fa
AL
35typedef struct VRingDesc
36{
37 uint64_t addr;
38 uint32_t len;
39 uint16_t flags;
40 uint16_t next;
41} VRingDesc;
42
43typedef struct VRingAvail
44{
45 uint16_t flags;
46 uint16_t idx;
47 uint16_t ring[0];
48} VRingAvail;
49
50typedef struct VRingUsedElem
51{
52 uint32_t id;
53 uint32_t len;
54} VRingUsedElem;
55
56typedef struct VRingUsed
57{
58 uint16_t flags;
59 uint16_t idx;
60 VRingUsedElem ring[0];
61} VRingUsed;
62
63typedef struct VRing
64{
65 unsigned int num;
46c5d082 66 unsigned int num_default;
6ce69d1c 67 unsigned int align;
a8170e5e
AK
68 hwaddr desc;
69 hwaddr avail;
70 hwaddr used;
967f97fa
AL
71} VRing;
72
73struct VirtQueue
74{
75 VRing vring;
be1fea9b
VM
76
77 /* Next head to pop */
967f97fa 78 uint16_t last_avail_idx;
b796fcd1 79
be1fea9b
VM
80 /* Last avail_idx read from VQ. */
81 uint16_t shadow_avail_idx;
82
b796fcd1
VM
83 uint16_t used_idx;
84
bcbabae8
MT
85 /* Last used index value we have signalled on */
86 uint16_t signalled_used;
87
88 /* Last used index value we have signalled on */
89 bool signalled_used_valid;
90
aff8fd18
SH
91 /* Nested host->guest notification disabled counter */
92 unsigned int notification_disabled;
bcbabae8 93
e78a2b42
JW
94 uint16_t queue_index;
95
e66bcc40 96 unsigned int inuse;
bcbabae8 97
7055e687 98 uint16_t vector;
bf1780b0
FZ
99 VirtIOHandleOutput handle_output;
100 VirtIOHandleOutput handle_aio_output;
1cbdabe2
MT
101 VirtIODevice *vdev;
102 EventNotifier guest_notifier;
103 EventNotifier host_notifier;
e0d686bf 104 QLIST_ENTRY(VirtQueue) node;
967f97fa
AL
105};
106
967f97fa 107/* virt queue functions */
ab223c95 108void virtio_queue_update_rings(VirtIODevice *vdev, int n)
967f97fa 109{
ab223c95 110 VRing *vring = &vdev->vq[n].vring;
53c25cea 111
ab223c95
CH
112 if (!vring->desc) {
113 /* not yet setup -> nothing to do */
114 return;
115 }
116 vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
117 vring->used = vring_align(vring->avail +
118 offsetof(VRingAvail, ring[vring->num]),
119 vring->align);
967f97fa
AL
120}
121
aa570d6f
PB
122static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
123 hwaddr desc_pa, int i)
967f97fa 124{
8607f5c3 125 address_space_read(vdev->dma_as, desc_pa + i * sizeof(VRingDesc),
aa570d6f
PB
126 MEMTXATTRS_UNSPECIFIED, (void *)desc, sizeof(VRingDesc));
127 virtio_tswap64s(vdev, &desc->addr);
128 virtio_tswap32s(vdev, &desc->len);
129 virtio_tswap16s(vdev, &desc->flags);
130 virtio_tswap16s(vdev, &desc->next);
967f97fa
AL
131}
132
133static inline uint16_t vring_avail_flags(VirtQueue *vq)
134{
a8170e5e 135 hwaddr pa;
967f97fa 136 pa = vq->vring.avail + offsetof(VRingAvail, flags);
cee3ca00 137 return virtio_lduw_phys(vq->vdev, pa);
967f97fa
AL
138}
139
140static inline uint16_t vring_avail_idx(VirtQueue *vq)
141{
a8170e5e 142 hwaddr pa;
967f97fa 143 pa = vq->vring.avail + offsetof(VRingAvail, idx);
be1fea9b
VM
144 vq->shadow_avail_idx = virtio_lduw_phys(vq->vdev, pa);
145 return vq->shadow_avail_idx;
967f97fa
AL
146}
147
148static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
149{
a8170e5e 150 hwaddr pa;
967f97fa 151 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
cee3ca00 152 return virtio_lduw_phys(vq->vdev, pa);
967f97fa
AL
153}
154
e9600c6c 155static inline uint16_t vring_get_used_event(VirtQueue *vq)
bcbabae8
MT
156{
157 return vring_avail_ring(vq, vq->vring.num);
158}
159
1cdd2ee5
VM
160static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
161 int i)
967f97fa 162{
a8170e5e 163 hwaddr pa;
1cdd2ee5
VM
164 virtio_tswap32s(vq->vdev, &uelem->id);
165 virtio_tswap32s(vq->vdev, &uelem->len);
166 pa = vq->vring.used + offsetof(VRingUsed, ring[i]);
8607f5c3 167 address_space_write(vq->vdev->dma_as, pa, MEMTXATTRS_UNSPECIFIED,
1cdd2ee5 168 (void *)uelem, sizeof(VRingUsedElem));
967f97fa
AL
169}
170
171static uint16_t vring_used_idx(VirtQueue *vq)
172{
a8170e5e 173 hwaddr pa;
967f97fa 174 pa = vq->vring.used + offsetof(VRingUsed, idx);
cee3ca00 175 return virtio_lduw_phys(vq->vdev, pa);
967f97fa
AL
176}
177
bcbabae8 178static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
967f97fa 179{
a8170e5e 180 hwaddr pa;
967f97fa 181 pa = vq->vring.used + offsetof(VRingUsed, idx);
cee3ca00 182 virtio_stw_phys(vq->vdev, pa, val);
b796fcd1 183 vq->used_idx = val;
967f97fa
AL
184}
185
186static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
187{
cee3ca00 188 VirtIODevice *vdev = vq->vdev;
a8170e5e 189 hwaddr pa;
967f97fa 190 pa = vq->vring.used + offsetof(VRingUsed, flags);
cee3ca00 191 virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) | mask);
967f97fa
AL
192}
193
194static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
195{
cee3ca00 196 VirtIODevice *vdev = vq->vdev;
a8170e5e 197 hwaddr pa;
967f97fa 198 pa = vq->vring.used + offsetof(VRingUsed, flags);
cee3ca00 199 virtio_stw_phys(vdev, pa, virtio_lduw_phys(vdev, pa) & ~mask);
967f97fa
AL
200}
201
e9600c6c 202static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
bcbabae8 203{
a8170e5e 204 hwaddr pa;
aff8fd18 205 if (vq->notification_disabled) {
bcbabae8
MT
206 return;
207 }
208 pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
cee3ca00 209 virtio_stw_phys(vq->vdev, pa, val);
bcbabae8
MT
210}
211
967f97fa
AL
212void virtio_queue_set_notification(VirtQueue *vq, int enable)
213{
aff8fd18
SH
214 if (enable) {
215 assert(vq->notification_disabled > 0);
216 vq->notification_disabled--;
217 } else {
218 vq->notification_disabled++;
219 }
220
95129d6f 221 if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
e9600c6c 222 vring_set_avail_event(vq, vring_avail_idx(vq));
bcbabae8 223 } else if (enable) {
967f97fa 224 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
bcbabae8 225 } else {
967f97fa 226 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
bcbabae8 227 }
92045d80
MT
228 if (enable) {
229 /* Expose avail event/used flags before caller checks the avail idx. */
230 smp_mb();
231 }
967f97fa
AL
232}
233
234int virtio_queue_ready(VirtQueue *vq)
235{
236 return vq->vring.avail != 0;
237}
238
be1fea9b
VM
239/* Fetch avail_idx from VQ memory only when we really need to know if
240 * guest has added some buffers. */
967f97fa
AL
241int virtio_queue_empty(VirtQueue *vq)
242{
be1fea9b
VM
243 if (vq->shadow_avail_idx != vq->last_avail_idx) {
244 return 0;
245 }
246
967f97fa
AL
247 return vring_avail_idx(vq) == vq->last_avail_idx;
248}
249
ce317461
JW
250static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
251 unsigned int len)
967f97fa 252{
8607f5c3 253 AddressSpace *dma_as = vq->vdev->dma_as;
967f97fa
AL
254 unsigned int offset;
255 int i;
256
967f97fa
AL
257 offset = 0;
258 for (i = 0; i < elem->in_num; i++) {
259 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
260
8607f5c3
JW
261 dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
262 elem->in_sg[i].iov_len,
263 DMA_DIRECTION_FROM_DEVICE, size);
967f97fa 264
0cea71a2 265 offset += size;
967f97fa
AL
266 }
267
26b258e1 268 for (i = 0; i < elem->out_num; i++)
8607f5c3
JW
269 dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
270 elem->out_sg[i].iov_len,
271 DMA_DIRECTION_TO_DEVICE,
272 elem->out_sg[i].iov_len);
ce317461
JW
273}
274
2640d2a5
SH
275/* virtqueue_detach_element:
276 * @vq: The #VirtQueue
277 * @elem: The #VirtQueueElement
278 * @len: number of bytes written
279 *
280 * Detach the element from the virtqueue. This function is suitable for device
281 * reset or other situations where a #VirtQueueElement is simply freed and will
282 * not be pushed or discarded.
283 */
284void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
285 unsigned int len)
286{
287 vq->inuse--;
288 virtqueue_unmap_sg(vq, elem, len);
289}
290
27e57efe 291/* virtqueue_unpop:
2640d2a5
SH
292 * @vq: The #VirtQueue
293 * @elem: The #VirtQueueElement
294 * @len: number of bytes written
295 *
296 * Pretend the most recent element wasn't popped from the virtqueue. The next
297 * call to virtqueue_pop() will refetch the element.
298 */
27e57efe
LP
299void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
300 unsigned int len)
29b9f5ef
JW
301{
302 vq->last_avail_idx--;
2640d2a5 303 virtqueue_detach_element(vq, elem, len);
29b9f5ef
JW
304}
305
297a75e6
SH
306/* virtqueue_rewind:
307 * @vq: The #VirtQueue
308 * @num: Number of elements to push back
309 *
310 * Pretend that elements weren't popped from the virtqueue. The next
311 * virtqueue_pop() will refetch the oldest element.
312 *
27e57efe 313 * Use virtqueue_unpop() instead if you have a VirtQueueElement.
297a75e6
SH
314 *
315 * Returns: true on success, false if @num is greater than the number of in use
316 * elements.
317 */
318bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
319{
320 if (num > vq->inuse) {
321 return false;
322 }
323 vq->last_avail_idx -= num;
324 vq->inuse -= num;
325 return true;
326}
327
ce317461
JW
328void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
329 unsigned int len, unsigned int idx)
330{
1cdd2ee5
VM
331 VRingUsedElem uelem;
332
ce317461
JW
333 trace_virtqueue_fill(vq, elem, len, idx);
334
335 virtqueue_unmap_sg(vq, elem, len);
26b258e1 336
f5ed3663
SH
337 if (unlikely(vq->vdev->broken)) {
338 return;
339 }
340
b796fcd1 341 idx = (idx + vq->used_idx) % vq->vring.num;
967f97fa 342
1cdd2ee5
VM
343 uelem.id = elem->index;
344 uelem.len = len;
345 vring_used_write(vq, &uelem, idx);
967f97fa
AL
346}
347
348void virtqueue_flush(VirtQueue *vq, unsigned int count)
349{
bcbabae8 350 uint16_t old, new;
f5ed3663
SH
351
352 if (unlikely(vq->vdev->broken)) {
353 vq->inuse -= count;
354 return;
355 }
356
967f97fa 357 /* Make sure buffer is written before we update index. */
b90d2f35 358 smp_wmb();
64979a4d 359 trace_virtqueue_flush(vq, count);
b796fcd1 360 old = vq->used_idx;
bcbabae8
MT
361 new = old + count;
362 vring_used_idx_set(vq, new);
967f97fa 363 vq->inuse -= count;
bcbabae8
MT
364 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
365 vq->signalled_used_valid = false;
967f97fa
AL
366}
367
368void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
369 unsigned int len)
370{
371 virtqueue_fill(vq, elem, len, 0);
372 virtqueue_flush(vq, 1);
373}
374
375static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
376{
377 uint16_t num_heads = vring_avail_idx(vq) - idx;
378
379 /* Check it isn't doing very strange things with descriptor numbers. */
bb6834cf 380 if (num_heads > vq->vring.num) {
4355c1ab 381 virtio_error(vq->vdev, "Guest moved used index from %u to %u",
be1fea9b 382 idx, vq->shadow_avail_idx);
4355c1ab 383 return -EINVAL;
bb6834cf 384 }
a821ce59
MT
385 /* On success, callers read a descriptor at vq->last_avail_idx.
386 * Make sure descriptor read does not bypass avail index read. */
387 if (num_heads) {
388 smp_rmb();
389 }
967f97fa
AL
390
391 return num_heads;
392}
393
fb1131b6
SH
394static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
395 unsigned int *head)
967f97fa 396{
967f97fa
AL
397 /* Grab the next descriptor number they're advertising, and increment
398 * the index we've seen. */
fb1131b6 399 *head = vring_avail_ring(vq, idx % vq->vring.num);
967f97fa
AL
400
401 /* If their number is silly, that's a fatal mistake. */
fb1131b6
SH
402 if (*head >= vq->vring.num) {
403 virtio_error(vq->vdev, "Guest says index %u is available", *head);
404 return false;
bb6834cf 405 }
967f97fa 406
fb1131b6 407 return true;
967f97fa
AL
408}
409
412e0e81
SH
410enum {
411 VIRTQUEUE_READ_DESC_ERROR = -1,
412 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
413 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
414};
967f97fa 415
412e0e81
SH
416static int virtqueue_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
417 hwaddr desc_pa, unsigned int max,
418 unsigned int *next)
419{
967f97fa 420 /* If this descriptor says it doesn't chain, we're done. */
aa570d6f 421 if (!(desc->flags & VRING_DESC_F_NEXT)) {
412e0e81 422 return VIRTQUEUE_READ_DESC_DONE;
cee3ca00 423 }
967f97fa
AL
424
425 /* Check they're not leading us off end of descriptors. */
412e0e81 426 *next = desc->next;
967f97fa 427 /* Make sure compiler knows to grab that: we don't want it changing! */
b90d2f35 428 smp_wmb();
967f97fa 429
412e0e81
SH
430 if (*next >= max) {
431 virtio_error(vdev, "Desc next is %u", *next);
432 return VIRTQUEUE_READ_DESC_ERROR;
bb6834cf 433 }
967f97fa 434
412e0e81
SH
435 vring_desc_read(vdev, desc, desc_pa, *next);
436 return VIRTQUEUE_READ_DESC_MORE;
967f97fa
AL
437}
438
0d8d7690 439void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
e1f7b481
MT
440 unsigned int *out_bytes,
441 unsigned max_in_bytes, unsigned max_out_bytes)
967f97fa 442{
efeea6d0 443 unsigned int idx;
385ce95d 444 unsigned int total_bufs, in_total, out_total;
412e0e81 445 int rc;
967f97fa
AL
446
447 idx = vq->last_avail_idx;
448
efeea6d0 449 total_bufs = in_total = out_total = 0;
4355c1ab 450 while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
cee3ca00 451 VirtIODevice *vdev = vq->vdev;
efeea6d0 452 unsigned int max, num_bufs, indirect = 0;
aa570d6f 453 VRingDesc desc;
a8170e5e 454 hwaddr desc_pa;
b1c7c07f 455 unsigned int i;
967f97fa 456
efeea6d0
MM
457 max = vq->vring.num;
458 num_bufs = total_bufs;
fb1131b6
SH
459
460 if (!virtqueue_get_head(vq, idx++, &i)) {
461 goto err;
462 }
463
efeea6d0 464 desc_pa = vq->vring.desc;
aa570d6f 465 vring_desc_read(vdev, &desc, desc_pa, i);
efeea6d0 466
aa570d6f
PB
467 if (desc.flags & VRING_DESC_F_INDIRECT) {
468 if (desc.len % sizeof(VRingDesc)) {
d65abf85
SH
469 virtio_error(vdev, "Invalid size for indirect buffer table");
470 goto err;
efeea6d0
MM
471 }
472
473 /* If we've got too many, that implies a descriptor loop. */
474 if (num_bufs >= max) {
d65abf85
SH
475 virtio_error(vdev, "Looped descriptor");
476 goto err;
efeea6d0
MM
477 }
478
479 /* loop over the indirect descriptor table */
480 indirect = 1;
aa570d6f
PB
481 max = desc.len / sizeof(VRingDesc);
482 desc_pa = desc.addr;
1ae2757c 483 num_bufs = i = 0;
aa570d6f 484 vring_desc_read(vdev, &desc, desc_pa, i);
efeea6d0
MM
485 }
486
967f97fa
AL
487 do {
488 /* If we've got too many, that implies a descriptor loop. */
5774cf98 489 if (++num_bufs > max) {
d65abf85
SH
490 virtio_error(vdev, "Looped descriptor");
491 goto err;
bb6834cf 492 }
967f97fa 493
aa570d6f
PB
494 if (desc.flags & VRING_DESC_F_WRITE) {
495 in_total += desc.len;
967f97fa 496 } else {
aa570d6f 497 out_total += desc.len;
967f97fa 498 }
e1f7b481
MT
499 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
500 goto done;
501 }
412e0e81
SH
502
503 rc = virtqueue_read_next_desc(vdev, &desc, desc_pa, max, &i);
504 } while (rc == VIRTQUEUE_READ_DESC_MORE);
505
506 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
507 goto err;
508 }
efeea6d0
MM
509
510 if (!indirect)
511 total_bufs = num_bufs;
512 else
513 total_bufs++;
967f97fa 514 }
4355c1ab
SH
515
516 if (rc < 0) {
517 goto err;
518 }
519
e1f7b481 520done:
0d8d7690
AS
521 if (in_bytes) {
522 *in_bytes = in_total;
523 }
524 if (out_bytes) {
525 *out_bytes = out_total;
526 }
d65abf85
SH
527 return;
528
529err:
530 in_total = out_total = 0;
531 goto done;
0d8d7690 532}
967f97fa 533
0d8d7690
AS
534int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
535 unsigned int out_bytes)
536{
537 unsigned int in_total, out_total;
538
e1f7b481
MT
539 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
540 return in_bytes <= in_total && out_bytes <= out_total;
967f97fa
AL
541}
542
ec55da19
SH
543static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
544 hwaddr *addr, struct iovec *iov,
3b3b0628
PB
545 unsigned int max_num_sg, bool is_write,
546 hwaddr pa, size_t sz)
547{
ec55da19 548 bool ok = false;
3b3b0628
PB
549 unsigned num_sg = *p_num_sg;
550 assert(num_sg <= max_num_sg);
551
1e7aed70 552 if (!sz) {
ec55da19
SH
553 virtio_error(vdev, "virtio: zero sized buffers are not allowed");
554 goto out;
1e7aed70
PP
555 }
556
3b3b0628
PB
557 while (sz) {
558 hwaddr len = sz;
559
560 if (num_sg == max_num_sg) {
ec55da19
SH
561 virtio_error(vdev, "virtio: too many write descriptors in "
562 "indirect table");
563 goto out;
3b3b0628
PB
564 }
565
8607f5c3
JW
566 iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
567 is_write ?
568 DMA_DIRECTION_FROM_DEVICE :
569 DMA_DIRECTION_TO_DEVICE);
973e7170 570 if (!iov[num_sg].iov_base) {
ec55da19
SH
571 virtio_error(vdev, "virtio: bogus descriptor or out of resources");
572 goto out;
973e7170
PP
573 }
574
3b3b0628
PB
575 iov[num_sg].iov_len = len;
576 addr[num_sg] = pa;
577
578 sz -= len;
579 pa += len;
580 num_sg++;
581 }
ec55da19
SH
582 ok = true;
583
584out:
3b3b0628 585 *p_num_sg = num_sg;
ec55da19
SH
586 return ok;
587}
588
589/* Only used by error code paths before we have a VirtQueueElement (therefore
590 * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
591 * yet.
592 */
593static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
594 struct iovec *iov)
595{
596 unsigned int i;
597
598 for (i = 0; i < out_num + in_num; i++) {
599 int is_write = i >= out_num;
600
601 cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
602 iov++;
603 }
3b3b0628
PB
604}
605
8607f5c3
JW
606static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
607 hwaddr *addr, unsigned int *num_sg,
608 unsigned int max_size, int is_write)
42fb2e07
KW
609{
610 unsigned int i;
a8170e5e 611 hwaddr len;
42fb2e07 612
8059feee
MT
613 /* Note: this function MUST validate input, some callers
614 * are passing in num_sg values received over the network.
615 */
616 /* TODO: teach all callers that this can fail, and return failure instead
617 * of asserting here.
618 * When we do, we might be able to re-enable NDEBUG below.
619 */
620#ifdef NDEBUG
621#error building with NDEBUG is not supported
622#endif
623 assert(*num_sg <= max_size);
624
625 for (i = 0; i < *num_sg; i++) {
42fb2e07 626 len = sg[i].iov_len;
8607f5c3
JW
627 sg[i].iov_base = dma_memory_map(vdev->dma_as,
628 addr[i], &len, is_write ?
629 DMA_DIRECTION_FROM_DEVICE :
630 DMA_DIRECTION_TO_DEVICE);
8059feee 631 if (!sg[i].iov_base) {
1a285899 632 error_report("virtio: error trying to map MMIO memory");
42fb2e07
KW
633 exit(1);
634 }
3b3b0628
PB
635 if (len != sg[i].iov_len) {
636 error_report("virtio: unexpected memory split");
8059feee
MT
637 exit(1);
638 }
42fb2e07
KW
639 }
640}
641
8607f5c3 642void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
8059feee 643{
8607f5c3
JW
644 virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, &elem->in_num,
645 MIN(ARRAY_SIZE(elem->in_sg), ARRAY_SIZE(elem->in_addr)),
646 1);
647 virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, &elem->out_num,
648 MIN(ARRAY_SIZE(elem->out_sg),
649 ARRAY_SIZE(elem->out_addr)),
650 0);
3724650d
PB
651}
652
bf91bd27 653static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
3724650d
PB
654{
655 VirtQueueElement *elem;
656 size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
657 size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
658 size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
659 size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
660 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
661 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
662
663 assert(sz >= sizeof(VirtQueueElement));
664 elem = g_malloc(out_sg_end);
665 elem->out_num = out_num;
666 elem->in_num = in_num;
667 elem->in_addr = (void *)elem + in_addr_ofs;
668 elem->out_addr = (void *)elem + out_addr_ofs;
669 elem->in_sg = (void *)elem + in_sg_ofs;
670 elem->out_sg = (void *)elem + out_sg_ofs;
671 return elem;
8059feee
MT
672}
673
51b19ebe 674void *virtqueue_pop(VirtQueue *vq, size_t sz)
967f97fa 675{
5774cf98 676 unsigned int i, head, max;
a8170e5e 677 hwaddr desc_pa = vq->vring.desc;
cee3ca00 678 VirtIODevice *vdev = vq->vdev;
51b19ebe 679 VirtQueueElement *elem;
3b3b0628
PB
680 unsigned out_num, in_num;
681 hwaddr addr[VIRTQUEUE_MAX_SIZE];
682 struct iovec iov[VIRTQUEUE_MAX_SIZE];
aa570d6f 683 VRingDesc desc;
412e0e81 684 int rc;
967f97fa 685
f5ed3663
SH
686 if (unlikely(vdev->broken)) {
687 return NULL;
688 }
be1fea9b 689 if (virtio_queue_empty(vq)) {
51b19ebe
PB
690 return NULL;
691 }
be1fea9b
VM
692 /* Needed after virtio_queue_empty(), see comment in
693 * virtqueue_num_heads(). */
694 smp_rmb();
967f97fa
AL
695
696 /* When we start there are none of either input nor output. */
3b3b0628 697 out_num = in_num = 0;
967f97fa 698
5774cf98
MM
699 max = vq->vring.num;
700
afd9096e 701 if (vq->inuse >= vq->vring.num) {
ec55da19
SH
702 virtio_error(vdev, "Virtqueue size exceeded");
703 return NULL;
afd9096e
SH
704 }
705
fb1131b6
SH
706 if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
707 return NULL;
708 }
709
95129d6f 710 if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
e9600c6c 711 vring_set_avail_event(vq, vq->last_avail_idx);
bcbabae8 712 }
efeea6d0 713
fb1131b6 714 i = head;
aa570d6f
PB
715 vring_desc_read(vdev, &desc, desc_pa, i);
716 if (desc.flags & VRING_DESC_F_INDIRECT) {
717 if (desc.len % sizeof(VRingDesc)) {
ec55da19
SH
718 virtio_error(vdev, "Invalid size for indirect buffer table");
719 return NULL;
efeea6d0
MM
720 }
721
722 /* loop over the indirect descriptor table */
aa570d6f
PB
723 max = desc.len / sizeof(VRingDesc);
724 desc_pa = desc.addr;
efeea6d0 725 i = 0;
aa570d6f 726 vring_desc_read(vdev, &desc, desc_pa, i);
efeea6d0
MM
727 }
728
42fb2e07 729 /* Collect all the descriptors */
967f97fa 730 do {
ec55da19
SH
731 bool map_ok;
732
aa570d6f 733 if (desc.flags & VRING_DESC_F_WRITE) {
ec55da19
SH
734 map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
735 iov + out_num,
736 VIRTQUEUE_MAX_SIZE - out_num, true,
737 desc.addr, desc.len);
42fb2e07 738 } else {
3b3b0628 739 if (in_num) {
ec55da19
SH
740 virtio_error(vdev, "Incorrect order for descriptors");
741 goto err_undo_map;
c8eac1cf 742 }
ec55da19
SH
743 map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
744 VIRTQUEUE_MAX_SIZE, false,
745 desc.addr, desc.len);
746 }
747 if (!map_ok) {
748 goto err_undo_map;
42fb2e07 749 }
967f97fa 750
967f97fa 751 /* If we've got too many, that implies a descriptor loop. */
3b3b0628 752 if ((in_num + out_num) > max) {
ec55da19
SH
753 virtio_error(vdev, "Looped descriptor");
754 goto err_undo_map;
bb6834cf 755 }
412e0e81
SH
756
757 rc = virtqueue_read_next_desc(vdev, &desc, desc_pa, max, &i);
758 } while (rc == VIRTQUEUE_READ_DESC_MORE);
759
760 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
761 goto err_undo_map;
762 }
967f97fa 763
3b3b0628
PB
764 /* Now copy what we have collected and mapped */
765 elem = virtqueue_alloc_element(sz, out_num, in_num);
967f97fa 766 elem->index = head;
3b3b0628
PB
767 for (i = 0; i < out_num; i++) {
768 elem->out_addr[i] = addr[i];
769 elem->out_sg[i] = iov[i];
770 }
771 for (i = 0; i < in_num; i++) {
772 elem->in_addr[i] = addr[out_num + i];
773 elem->in_sg[i] = iov[out_num + i];
774 }
967f97fa
AL
775
776 vq->inuse++;
777
64979a4d 778 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
51b19ebe 779 return elem;
ec55da19
SH
780
781err_undo_map:
782 virtqueue_undo_map_desc(out_num, in_num, iov);
783 return NULL;
967f97fa
AL
784}
785
3724650d
PB
786/* Reading and writing a structure directly to QEMUFile is *awful*, but
787 * it is what QEMU has always done by mistake. We can change it sooner
788 * or later by bumping the version number of the affected vm states.
789 * In the meanwhile, since the in-memory layout of VirtQueueElement
790 * has changed, we need to marshal to and from the layout that was
791 * used before the change.
792 */
793typedef struct VirtQueueElementOld {
794 unsigned int index;
795 unsigned int out_num;
796 unsigned int in_num;
797 hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
798 hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
799 struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
800 struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
801} VirtQueueElementOld;
802
8607f5c3 803void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
ab281c17 804{
3724650d
PB
805 VirtQueueElement *elem;
806 VirtQueueElementOld data;
807 int i;
808
809 qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
810
811 elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
812 elem->index = data.index;
813
814 for (i = 0; i < elem->in_num; i++) {
815 elem->in_addr[i] = data.in_addr[i];
816 }
817
818 for (i = 0; i < elem->out_num; i++) {
819 elem->out_addr[i] = data.out_addr[i];
820 }
821
822 for (i = 0; i < elem->in_num; i++) {
823 /* Base is overwritten by virtqueue_map. */
824 elem->in_sg[i].iov_base = 0;
825 elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
826 }
827
828 for (i = 0; i < elem->out_num; i++) {
829 /* Base is overwritten by virtqueue_map. */
830 elem->out_sg[i].iov_base = 0;
831 elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
832 }
833
8607f5c3 834 virtqueue_map(vdev, elem);
ab281c17
PB
835 return elem;
836}
837
838void qemu_put_virtqueue_element(QEMUFile *f, VirtQueueElement *elem)
839{
3724650d
PB
840 VirtQueueElementOld data;
841 int i;
842
843 memset(&data, 0, sizeof(data));
844 data.index = elem->index;
845 data.in_num = elem->in_num;
846 data.out_num = elem->out_num;
847
848 for (i = 0; i < elem->in_num; i++) {
849 data.in_addr[i] = elem->in_addr[i];
850 }
851
852 for (i = 0; i < elem->out_num; i++) {
853 data.out_addr[i] = elem->out_addr[i];
854 }
855
856 for (i = 0; i < elem->in_num; i++) {
857 /* Base is overwritten by virtqueue_map when loading. Do not
858 * save it, as it would leak the QEMU address space layout. */
859 data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
860 }
861
862 for (i = 0; i < elem->out_num; i++) {
863 /* Do not save iov_base as above. */
864 data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
865 }
866 qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
ab281c17
PB
867}
868
967f97fa 869/* virtio device */
7055e687
MT
870static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
871{
1c819449
FK
872 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
873 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
874
f5ed3663
SH
875 if (unlikely(vdev->broken)) {
876 return;
877 }
878
1c819449
FK
879 if (k->notify) {
880 k->notify(qbus->parent, vector);
7055e687
MT
881 }
882}
967f97fa 883
53c25cea 884void virtio_update_irq(VirtIODevice *vdev)
967f97fa 885{
7055e687 886 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
967f97fa
AL
887}
888
0b352fd6
CH
889static int virtio_validate_features(VirtIODevice *vdev)
890{
891 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
892
8607f5c3
JW
893 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
894 !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
895 return -EFAULT;
896 }
897
0b352fd6
CH
898 if (k->validate_features) {
899 return k->validate_features(vdev);
900 } else {
901 return 0;
902 }
903}
904
905int virtio_set_status(VirtIODevice *vdev, uint8_t val)
4e1837f8 906{
181103cd 907 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
4e1837f8
SH
908 trace_virtio_set_status(vdev, val);
909
95129d6f 910 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
0b352fd6
CH
911 if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
912 val & VIRTIO_CONFIG_S_FEATURES_OK) {
913 int ret = virtio_validate_features(vdev);
914
915 if (ret) {
916 return ret;
917 }
918 }
919 }
181103cd
FK
920 if (k->set_status) {
921 k->set_status(vdev, val);
4e1837f8
SH
922 }
923 vdev->status = val;
0b352fd6 924 return 0;
4e1837f8
SH
925}
926
616a6552
GK
927bool target_words_bigendian(void);
928static enum virtio_device_endian virtio_default_endian(void)
929{
930 if (target_words_bigendian()) {
931 return VIRTIO_DEVICE_ENDIAN_BIG;
932 } else {
933 return VIRTIO_DEVICE_ENDIAN_LITTLE;
934 }
935}
936
937static enum virtio_device_endian virtio_current_cpu_endian(void)
938{
939 CPUClass *cc = CPU_GET_CLASS(current_cpu);
940
941 if (cc->virtio_is_big_endian(current_cpu)) {
942 return VIRTIO_DEVICE_ENDIAN_BIG;
943 } else {
944 return VIRTIO_DEVICE_ENDIAN_LITTLE;
945 }
946}
947
53c25cea 948void virtio_reset(void *opaque)
967f97fa
AL
949{
950 VirtIODevice *vdev = opaque;
181103cd 951 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
952 int i;
953
e0c472d8 954 virtio_set_status(vdev, 0);
616a6552
GK
955 if (current_cpu) {
956 /* Guest initiated reset */
957 vdev->device_endian = virtio_current_cpu_endian();
958 } else {
959 /* System reset */
960 vdev->device_endian = virtio_default_endian();
961 }
e0c472d8 962
181103cd
FK
963 if (k->reset) {
964 k->reset(vdev);
965 }
967f97fa 966
f5ed3663 967 vdev->broken = false;
704a76fc 968 vdev->guest_features = 0;
967f97fa
AL
969 vdev->queue_sel = 0;
970 vdev->status = 0;
0687c37c 971 atomic_set(&vdev->isr, 0);
7055e687
MT
972 vdev->config_vector = VIRTIO_NO_VECTOR;
973 virtio_notify_vector(vdev, vdev->config_vector);
967f97fa 974
87b3bd1c 975 for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
967f97fa
AL
976 vdev->vq[i].vring.desc = 0;
977 vdev->vq[i].vring.avail = 0;
978 vdev->vq[i].vring.used = 0;
979 vdev->vq[i].last_avail_idx = 0;
be1fea9b 980 vdev->vq[i].shadow_avail_idx = 0;
b796fcd1 981 vdev->vq[i].used_idx = 0;
e0d686bf 982 virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
bcbabae8
MT
983 vdev->vq[i].signalled_used = 0;
984 vdev->vq[i].signalled_used_valid = false;
aff8fd18 985 vdev->vq[i].notification_disabled = 0;
46c5d082 986 vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
4b7f91ed 987 vdev->vq[i].inuse = 0;
967f97fa
AL
988 }
989}
990
53c25cea 991uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
967f97fa 992{
181103cd 993 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
994 uint8_t val;
995
5f5a1318 996 if (addr + sizeof(val) > vdev->config_len) {
967f97fa 997 return (uint32_t)-1;
5f5a1318
JW
998 }
999
1000 k->get_config(vdev, vdev->config);
967f97fa 1001
06dbfc6f 1002 val = ldub_p(vdev->config + addr);
967f97fa
AL
1003 return val;
1004}
1005
53c25cea 1006uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
967f97fa 1007{
181103cd 1008 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
1009 uint16_t val;
1010
5f5a1318 1011 if (addr + sizeof(val) > vdev->config_len) {
967f97fa 1012 return (uint32_t)-1;
5f5a1318
JW
1013 }
1014
1015 k->get_config(vdev, vdev->config);
967f97fa 1016
06dbfc6f 1017 val = lduw_p(vdev->config + addr);
967f97fa
AL
1018 return val;
1019}
1020
53c25cea 1021uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
967f97fa 1022{
181103cd 1023 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
1024 uint32_t val;
1025
5f5a1318 1026 if (addr + sizeof(val) > vdev->config_len) {
967f97fa 1027 return (uint32_t)-1;
5f5a1318
JW
1028 }
1029
1030 k->get_config(vdev, vdev->config);
967f97fa 1031
06dbfc6f 1032 val = ldl_p(vdev->config + addr);
967f97fa
AL
1033 return val;
1034}
1035
53c25cea 1036void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 1037{
181103cd 1038 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
1039 uint8_t val = data;
1040
5f5a1318 1041 if (addr + sizeof(val) > vdev->config_len) {
967f97fa 1042 return;
5f5a1318 1043 }
967f97fa 1044
06dbfc6f 1045 stb_p(vdev->config + addr, val);
967f97fa 1046
181103cd
FK
1047 if (k->set_config) {
1048 k->set_config(vdev, vdev->config);
1049 }
967f97fa
AL
1050}
1051
53c25cea 1052void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 1053{
181103cd 1054 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
1055 uint16_t val = data;
1056
5f5a1318 1057 if (addr + sizeof(val) > vdev->config_len) {
967f97fa 1058 return;
5f5a1318 1059 }
967f97fa 1060
06dbfc6f 1061 stw_p(vdev->config + addr, val);
967f97fa 1062
181103cd
FK
1063 if (k->set_config) {
1064 k->set_config(vdev, vdev->config);
1065 }
967f97fa
AL
1066}
1067
53c25cea 1068void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
967f97fa 1069{
181103cd 1070 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa
AL
1071 uint32_t val = data;
1072
5f5a1318 1073 if (addr + sizeof(val) > vdev->config_len) {
967f97fa 1074 return;
5f5a1318 1075 }
967f97fa 1076
06dbfc6f 1077 stl_p(vdev->config + addr, val);
967f97fa 1078
181103cd
FK
1079 if (k->set_config) {
1080 k->set_config(vdev, vdev->config);
1081 }
967f97fa
AL
1082}
1083
adfb743c
MT
1084uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
1085{
1086 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1087 uint8_t val;
1088
1089 if (addr + sizeof(val) > vdev->config_len) {
1090 return (uint32_t)-1;
1091 }
1092
1093 k->get_config(vdev, vdev->config);
1094
1095 val = ldub_p(vdev->config + addr);
1096 return val;
1097}
1098
1099uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
1100{
1101 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1102 uint16_t val;
1103
1104 if (addr + sizeof(val) > vdev->config_len) {
1105 return (uint32_t)-1;
1106 }
1107
1108 k->get_config(vdev, vdev->config);
1109
1110 val = lduw_le_p(vdev->config + addr);
1111 return val;
1112}
1113
1114uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
1115{
1116 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1117 uint32_t val;
1118
1119 if (addr + sizeof(val) > vdev->config_len) {
1120 return (uint32_t)-1;
1121 }
1122
1123 k->get_config(vdev, vdev->config);
1124
1125 val = ldl_le_p(vdev->config + addr);
1126 return val;
1127}
1128
1129void virtio_config_modern_writeb(VirtIODevice *vdev,
1130 uint32_t addr, uint32_t data)
1131{
1132 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1133 uint8_t val = data;
1134
1135 if (addr + sizeof(val) > vdev->config_len) {
1136 return;
1137 }
1138
1139 stb_p(vdev->config + addr, val);
1140
1141 if (k->set_config) {
1142 k->set_config(vdev, vdev->config);
1143 }
1144}
1145
1146void virtio_config_modern_writew(VirtIODevice *vdev,
1147 uint32_t addr, uint32_t data)
1148{
1149 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1150 uint16_t val = data;
1151
1152 if (addr + sizeof(val) > vdev->config_len) {
1153 return;
1154 }
1155
1156 stw_le_p(vdev->config + addr, val);
1157
1158 if (k->set_config) {
1159 k->set_config(vdev, vdev->config);
1160 }
1161}
1162
1163void virtio_config_modern_writel(VirtIODevice *vdev,
1164 uint32_t addr, uint32_t data)
1165{
1166 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1167 uint32_t val = data;
1168
1169 if (addr + sizeof(val) > vdev->config_len) {
1170 return;
1171 }
1172
1173 stl_le_p(vdev->config + addr, val);
1174
1175 if (k->set_config) {
1176 k->set_config(vdev, vdev->config);
1177 }
1178}
1179
a8170e5e 1180void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
967f97fa 1181{
ab223c95
CH
1182 vdev->vq[n].vring.desc = addr;
1183 virtio_queue_update_rings(vdev, n);
53c25cea
PB
1184}
1185
a8170e5e 1186hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
53c25cea 1187{
ab223c95
CH
1188 return vdev->vq[n].vring.desc;
1189}
1190
1191void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
1192 hwaddr avail, hwaddr used)
1193{
1194 vdev->vq[n].vring.desc = desc;
1195 vdev->vq[n].vring.avail = avail;
1196 vdev->vq[n].vring.used = used;
53c25cea
PB
1197}
1198
e63c0ba1
PM
1199void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
1200{
f6049f44
PM
1201 /* Don't allow guest to flip queue between existent and
1202 * nonexistent states, or to set it to an invalid size.
1203 */
1204 if (!!num != !!vdev->vq[n].vring.num ||
1205 num > VIRTQUEUE_MAX_SIZE ||
1206 num < 0) {
1207 return;
e63c0ba1 1208 }
f6049f44 1209 vdev->vq[n].vring.num = num;
e63c0ba1
PM
1210}
1211
e0d686bf
JW
1212VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
1213{
1214 return QLIST_FIRST(&vdev->vector_queues[vector]);
1215}
1216
1217VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
1218{
1219 return QLIST_NEXT(vq, node);
1220}
1221
53c25cea
PB
1222int virtio_queue_get_num(VirtIODevice *vdev, int n)
1223{
1224 return vdev->vq[n].vring.num;
1225}
967f97fa 1226
8ad176aa
JW
1227int virtio_get_num_queues(VirtIODevice *vdev)
1228{
1229 int i;
1230
87b3bd1c 1231 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
8ad176aa
JW
1232 if (!virtio_queue_get_num(vdev, i)) {
1233 break;
1234 }
1235 }
1236
1237 return i;
1238}
1239
6ce69d1c
PM
1240void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
1241{
1242 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1243 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1244
ab223c95 1245 /* virtio-1 compliant devices cannot change the alignment */
95129d6f 1246 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
ab223c95
CH
1247 error_report("tried to modify queue alignment for virtio-1 device");
1248 return;
1249 }
6ce69d1c
PM
1250 /* Check that the transport told us it was going to do this
1251 * (so a buggy transport will immediately assert rather than
1252 * silently failing to migrate this state)
1253 */
1254 assert(k->has_variable_vring_alignment);
1255
1256 vdev->vq[n].vring.align = align;
ab223c95 1257 virtio_queue_update_rings(vdev, n);
6ce69d1c
PM
1258}
1259
344dc16f
MT
1260static void virtio_queue_notify_aio_vq(VirtQueue *vq)
1261{
1262 if (vq->vring.desc && vq->handle_aio_output) {
1263 VirtIODevice *vdev = vq->vdev;
1264
1265 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1266 vq->handle_aio_output(vdev, vq);
1267 }
1268}
1269
2b2cbcad 1270static void virtio_queue_notify_vq(VirtQueue *vq)
25db9ebe 1271{
9e0f5b81 1272 if (vq->vring.desc && vq->handle_output) {
25db9ebe 1273 VirtIODevice *vdev = vq->vdev;
9e0f5b81 1274
f5ed3663
SH
1275 if (unlikely(vdev->broken)) {
1276 return;
1277 }
1278
25db9ebe
SH
1279 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
1280 vq->handle_output(vdev, vq);
1281 }
1282}
1283
53c25cea
PB
1284void virtio_queue_notify(VirtIODevice *vdev, int n)
1285{
7157e2e2 1286 virtio_queue_notify_vq(&vdev->vq[n]);
967f97fa
AL
1287}
1288
7055e687
MT
1289uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
1290{
87b3bd1c 1291 return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
7055e687
MT
1292 VIRTIO_NO_VECTOR;
1293}
1294
1295void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
1296{
e0d686bf
JW
1297 VirtQueue *vq = &vdev->vq[n];
1298
87b3bd1c 1299 if (n < VIRTIO_QUEUE_MAX) {
e0d686bf
JW
1300 if (vdev->vector_queues &&
1301 vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
1302 QLIST_REMOVE(vq, node);
1303 }
7055e687 1304 vdev->vq[n].vector = vector;
e0d686bf
JW
1305 if (vdev->vector_queues &&
1306 vector != VIRTIO_NO_VECTOR) {
1307 QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
1308 }
1309 }
7055e687
MT
1310}
1311
f1ac6a55
PB
1312VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
1313 VirtIOHandleOutput handle_output)
967f97fa
AL
1314{
1315 int i;
1316
87b3bd1c 1317 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
967f97fa
AL
1318 if (vdev->vq[i].vring.num == 0)
1319 break;
1320 }
1321
87b3bd1c 1322 if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
967f97fa
AL
1323 abort();
1324
1325 vdev->vq[i].vring.num = queue_size;
46c5d082 1326 vdev->vq[i].vring.num_default = queue_size;
6ce69d1c 1327 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
967f97fa 1328 vdev->vq[i].handle_output = handle_output;
344dc16f 1329 vdev->vq[i].handle_aio_output = NULL;
967f97fa
AL
1330
1331 return &vdev->vq[i];
1332}
1333
f23fd811
JW
1334void virtio_del_queue(VirtIODevice *vdev, int n)
1335{
87b3bd1c 1336 if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
f23fd811
JW
1337 abort();
1338 }
1339
1340 vdev->vq[n].vring.num = 0;
46c5d082 1341 vdev->vq[n].vring.num_default = 0;
f23fd811
JW
1342}
1343
0687c37c
PB
1344static void virtio_set_isr(VirtIODevice *vdev, int value)
1345{
1346 uint8_t old = atomic_read(&vdev->isr);
1347
1348 /* Do not write ISR if it does not change, so that its cacheline remains
1349 * shared in the common case where the guest does not read it.
1350 */
1351 if ((old & value) != value) {
1352 atomic_or(&vdev->isr, value);
1353 }
1354}
1355
adb3feda 1356bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
bcbabae8
MT
1357{
1358 uint16_t old, new;
1359 bool v;
a281ebc1
MT
1360 /* We need to expose used array entries before checking used event. */
1361 smp_mb();
97b83deb 1362 /* Always notify when queue is empty (when feature acknowledge) */
95129d6f 1363 if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
be1fea9b 1364 !vq->inuse && virtio_queue_empty(vq)) {
bcbabae8
MT
1365 return true;
1366 }
1367
95129d6f 1368 if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
bcbabae8
MT
1369 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
1370 }
1371
1372 v = vq->signalled_used_valid;
1373 vq->signalled_used_valid = true;
1374 old = vq->signalled_used;
b796fcd1 1375 new = vq->signalled_used = vq->used_idx;
e9600c6c 1376 return !v || vring_need_event(vring_get_used_event(vq), new, old);
bcbabae8
MT
1377}
1378
83d768b5
PB
1379void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
1380{
1381 if (!virtio_should_notify(vdev, vq)) {
1382 return;
1383 }
1384
1385 trace_virtio_notify_irqfd(vdev, vq);
1386
1387 /*
1388 * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
1389 * windows drivers included in virtio-win 1.8.0 (circa 2015) are
1390 * incorrectly polling this bit during crashdump and hibernation
1391 * in MSI mode, causing a hang if this bit is never updated.
1392 * Recent releases of Windows do not really shut down, but rather
1393 * log out and hibernate to make the next startup faster. Hence,
1394 * this manifested as a more serious hang during shutdown with
1395 *
1396 * Next driver release from 2016 fixed this problem, so working around it
1397 * is not a must, but it's easy to do so let's do it here.
1398 *
1399 * Note: it's safe to update ISR from any thread as it was switched
1400 * to an atomic operation.
1401 */
1402 virtio_set_isr(vq->vdev, 0x1);
1403 event_notifier_set(&vq->guest_notifier);
1404}
1405
bcbabae8
MT
1406void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
1407{
adb3feda 1408 if (!virtio_should_notify(vdev, vq)) {
967f97fa 1409 return;
bcbabae8 1410 }
967f97fa 1411
64979a4d 1412 trace_virtio_notify(vdev, vq);
0687c37c 1413 virtio_set_isr(vq->vdev, 0x1);
7055e687 1414 virtio_notify_vector(vdev, vq->vector);
967f97fa
AL
1415}
1416
1417void virtio_notify_config(VirtIODevice *vdev)
1418{
7625162c
AL
1419 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
1420 return;
1421
0687c37c 1422 virtio_set_isr(vdev, 0x3);
b8f05908 1423 vdev->generation++;
7055e687 1424 virtio_notify_vector(vdev, vdev->config_vector);
967f97fa
AL
1425}
1426
616a6552
GK
1427static bool virtio_device_endian_needed(void *opaque)
1428{
1429 VirtIODevice *vdev = opaque;
1430
1431 assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
95129d6f 1432 if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3c185597
CH
1433 return vdev->device_endian != virtio_default_endian();
1434 }
1435 /* Devices conforming to VIRTIO 1.0 or later are always LE. */
1436 return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
616a6552
GK
1437}
1438
019a3edb
GH
1439static bool virtio_64bit_features_needed(void *opaque)
1440{
1441 VirtIODevice *vdev = opaque;
1442
1443 return (vdev->host_features >> 32) != 0;
1444}
1445
74aae7b2
JW
1446static bool virtio_virtqueue_needed(void *opaque)
1447{
1448 VirtIODevice *vdev = opaque;
1449
1450 return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
1451}
1452
46c5d082
CH
1453static bool virtio_ringsize_needed(void *opaque)
1454{
1455 VirtIODevice *vdev = opaque;
1456 int i;
1457
1458 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
1459 if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
1460 return true;
1461 }
1462 }
1463 return false;
1464}
1465
a6df8adf
JW
1466static bool virtio_extra_state_needed(void *opaque)
1467{
1468 VirtIODevice *vdev = opaque;
1469 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1470 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1471
1472 return k->has_extra_state &&
1473 k->has_extra_state(qbus->parent);
1474}
1475
791b1daf
SH
1476static bool virtio_broken_needed(void *opaque)
1477{
1478 VirtIODevice *vdev = opaque;
1479
1480 return vdev->broken;
1481}
1482
50e5ae4d 1483static const VMStateDescription vmstate_virtqueue = {
74aae7b2 1484 .name = "virtqueue_state",
50e5ae4d
DDAG
1485 .version_id = 1,
1486 .minimum_version_id = 1,
1487 .fields = (VMStateField[]) {
1488 VMSTATE_UINT64(vring.avail, struct VirtQueue),
1489 VMSTATE_UINT64(vring.used, struct VirtQueue),
1490 VMSTATE_END_OF_LIST()
1491 }
74aae7b2
JW
1492};
1493
1494static const VMStateDescription vmstate_virtio_virtqueues = {
1495 .name = "virtio/virtqueues",
1496 .version_id = 1,
1497 .minimum_version_id = 1,
1498 .needed = &virtio_virtqueue_needed,
1499 .fields = (VMStateField[]) {
3e996cc5
DDAG
1500 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
1501 VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
74aae7b2
JW
1502 VMSTATE_END_OF_LIST()
1503 }
1504};
1505
50e5ae4d 1506static const VMStateDescription vmstate_ringsize = {
46c5d082 1507 .name = "ringsize_state",
50e5ae4d
DDAG
1508 .version_id = 1,
1509 .minimum_version_id = 1,
1510 .fields = (VMStateField[]) {
1511 VMSTATE_UINT32(vring.num_default, struct VirtQueue),
1512 VMSTATE_END_OF_LIST()
1513 }
46c5d082
CH
1514};
1515
1516static const VMStateDescription vmstate_virtio_ringsize = {
1517 .name = "virtio/ringsize",
1518 .version_id = 1,
1519 .minimum_version_id = 1,
1520 .needed = &virtio_ringsize_needed,
1521 .fields = (VMStateField[]) {
3e996cc5
DDAG
1522 VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
1523 VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
46c5d082
CH
1524 VMSTATE_END_OF_LIST()
1525 }
1526};
1527
a6df8adf
JW
1528static int get_extra_state(QEMUFile *f, void *pv, size_t size)
1529{
1530 VirtIODevice *vdev = pv;
1531 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1532 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1533
1534 if (!k->load_extra_state) {
1535 return -1;
1536 } else {
1537 return k->load_extra_state(qbus->parent, f);
1538 }
1539}
1540
1541static void put_extra_state(QEMUFile *f, void *pv, size_t size)
1542{
1543 VirtIODevice *vdev = pv;
1544 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1545 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1546
1547 k->save_extra_state(qbus->parent, f);
1548}
1549
1550static const VMStateInfo vmstate_info_extra_state = {
1551 .name = "virtqueue_extra_state",
1552 .get = get_extra_state,
1553 .put = put_extra_state,
1554};
1555
1556static const VMStateDescription vmstate_virtio_extra_state = {
1557 .name = "virtio/extra_state",
1558 .version_id = 1,
1559 .minimum_version_id = 1,
1560 .needed = &virtio_extra_state_needed,
1561 .fields = (VMStateField[]) {
1562 {
1563 .name = "extra_state",
1564 .version_id = 0,
1565 .field_exists = NULL,
1566 .size = 0,
1567 .info = &vmstate_info_extra_state,
1568 .flags = VMS_SINGLE,
1569 .offset = 0,
1570 },
1571 VMSTATE_END_OF_LIST()
1572 }
1573};
1574
616a6552
GK
1575static const VMStateDescription vmstate_virtio_device_endian = {
1576 .name = "virtio/device_endian",
1577 .version_id = 1,
1578 .minimum_version_id = 1,
5cd8cada 1579 .needed = &virtio_device_endian_needed,
616a6552
GK
1580 .fields = (VMStateField[]) {
1581 VMSTATE_UINT8(device_endian, VirtIODevice),
1582 VMSTATE_END_OF_LIST()
1583 }
1584};
1585
019a3edb
GH
1586static const VMStateDescription vmstate_virtio_64bit_features = {
1587 .name = "virtio/64bit_features",
1588 .version_id = 1,
1589 .minimum_version_id = 1,
5cd8cada 1590 .needed = &virtio_64bit_features_needed,
019a3edb
GH
1591 .fields = (VMStateField[]) {
1592 VMSTATE_UINT64(guest_features, VirtIODevice),
1593 VMSTATE_END_OF_LIST()
1594 }
1595};
1596
791b1daf
SH
1597static const VMStateDescription vmstate_virtio_broken = {
1598 .name = "virtio/broken",
1599 .version_id = 1,
1600 .minimum_version_id = 1,
1601 .needed = &virtio_broken_needed,
1602 .fields = (VMStateField[]) {
1603 VMSTATE_BOOL(broken, VirtIODevice),
1604 VMSTATE_END_OF_LIST()
1605 }
1606};
1607
6b321a3d
GK
1608static const VMStateDescription vmstate_virtio = {
1609 .name = "virtio",
1610 .version_id = 1,
1611 .minimum_version_id = 1,
1612 .minimum_version_id_old = 1,
1613 .fields = (VMStateField[]) {
1614 VMSTATE_END_OF_LIST()
616a6552 1615 },
5cd8cada
JQ
1616 .subsections = (const VMStateDescription*[]) {
1617 &vmstate_virtio_device_endian,
1618 &vmstate_virtio_64bit_features,
74aae7b2 1619 &vmstate_virtio_virtqueues,
46c5d082 1620 &vmstate_virtio_ringsize,
791b1daf 1621 &vmstate_virtio_broken,
a6df8adf 1622 &vmstate_virtio_extra_state,
5cd8cada 1623 NULL
6b321a3d
GK
1624 }
1625};
1626
967f97fa
AL
1627void virtio_save(VirtIODevice *vdev, QEMUFile *f)
1628{
1c819449
FK
1629 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1630 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1b5fc0de 1631 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
019a3edb 1632 uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
967f97fa
AL
1633 int i;
1634
1c819449
FK
1635 if (k->save_config) {
1636 k->save_config(qbus->parent, f);
1637 }
967f97fa 1638
967f97fa
AL
1639 qemu_put_8s(f, &vdev->status);
1640 qemu_put_8s(f, &vdev->isr);
1641 qemu_put_be16s(f, &vdev->queue_sel);
019a3edb 1642 qemu_put_be32s(f, &guest_features_lo);
967f97fa
AL
1643 qemu_put_be32(f, vdev->config_len);
1644 qemu_put_buffer(f, vdev->config, vdev->config_len);
1645
87b3bd1c 1646 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
967f97fa
AL
1647 if (vdev->vq[i].vring.num == 0)
1648 break;
1649 }
1650
1651 qemu_put_be32(f, i);
1652
87b3bd1c 1653 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
967f97fa
AL
1654 if (vdev->vq[i].vring.num == 0)
1655 break;
1656
1657 qemu_put_be32(f, vdev->vq[i].vring.num);
6ce69d1c
PM
1658 if (k->has_variable_vring_alignment) {
1659 qemu_put_be32(f, vdev->vq[i].vring.align);
1660 }
ab223c95
CH
1661 /* XXX virtio-1 devices */
1662 qemu_put_be64(f, vdev->vq[i].vring.desc);
967f97fa 1663 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
1c819449
FK
1664 if (k->save_queue) {
1665 k->save_queue(qbus->parent, i, f);
1666 }
967f97fa 1667 }
1b5fc0de
GK
1668
1669 if (vdc->save != NULL) {
1670 vdc->save(vdev, f);
1671 }
6b321a3d 1672
ea43e259
DDAG
1673 if (vdc->vmsd) {
1674 vmstate_save_state(f, vdc->vmsd, vdev, NULL);
1675 }
1676
6b321a3d 1677 /* Subsections */
8118f095 1678 vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
967f97fa
AL
1679}
1680
1a665855
HP
1681/* A wrapper for use as a VMState .put function */
1682static void virtio_device_put(QEMUFile *f, void *opaque, size_t size)
1683{
1684 virtio_save(VIRTIO_DEVICE(opaque), f);
1685}
1686
1687/* A wrapper for use as a VMState .get function */
1688static int virtio_device_get(QEMUFile *f, void *opaque, size_t size)
1689{
1690 VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
1691 DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
1692
1693 return virtio_load(vdev, f, dc->vmsd->version_id);
1694}
1695
1696const VMStateInfo virtio_vmstate_info = {
1697 .name = "virtio",
1698 .get = virtio_device_get,
1699 .put = virtio_device_put,
1700};
1701
6c0196d7 1702static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
ad0c9332 1703{
181103cd 1704 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
6b8f1020 1705 bool bad = (val & ~(vdev->host_features)) != 0;
ad0c9332 1706
6b8f1020 1707 val &= vdev->host_features;
181103cd
FK
1708 if (k->set_features) {
1709 k->set_features(vdev, val);
ad0c9332
PB
1710 }
1711 vdev->guest_features = val;
1712 return bad ? -1 : 0;
1713}
1714
6c0196d7
CH
1715int virtio_set_features(VirtIODevice *vdev, uint64_t val)
1716{
1717 /*
1718 * The driver must not attempt to set features after feature negotiation
1719 * has finished.
1720 */
1721 if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
1722 return -EINVAL;
1723 }
1724 return virtio_set_features_nocheck(vdev, val);
1725}
1726
1b5fc0de 1727int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
967f97fa 1728{
cc459952 1729 int i, ret;
a890a2f9 1730 int32_t config_len;
cc459952 1731 uint32_t num;
6d74ca5a 1732 uint32_t features;
1c819449
FK
1733 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1734 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1b5fc0de 1735 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
967f97fa 1736
616a6552
GK
1737 /*
1738 * We poison the endianness to ensure it does not get used before
1739 * subsections have been loaded.
1740 */
1741 vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
1742
1c819449
FK
1743 if (k->load_config) {
1744 ret = k->load_config(qbus->parent, f);
ff24bd58
MT
1745 if (ret)
1746 return ret;
1747 }
967f97fa 1748
967f97fa
AL
1749 qemu_get_8s(f, &vdev->status);
1750 qemu_get_8s(f, &vdev->isr);
1751 qemu_get_be16s(f, &vdev->queue_sel);
87b3bd1c 1752 if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
4b53c2c7
MR
1753 return -1;
1754 }
6d74ca5a 1755 qemu_get_be32s(f, &features);
ad0c9332 1756
62cee1a2
MT
1757 /*
1758 * Temporarily set guest_features low bits - needed by
1759 * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
1760 * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
1761 *
1762 * Note: devices should always test host features in future - don't create
1763 * new dependencies like this.
1764 */
1765 vdev->guest_features = features;
1766
a890a2f9 1767 config_len = qemu_get_be32(f);
2f5732e9
DDAG
1768
1769 /*
1770 * There are cases where the incoming config can be bigger or smaller
1771 * than what we have; so load what we have space for, and skip
1772 * any excess that's in the stream.
1773 */
1774 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
1775
1776 while (config_len > vdev->config_len) {
1777 qemu_get_byte(f);
1778 config_len--;
a890a2f9 1779 }
967f97fa
AL
1780
1781 num = qemu_get_be32(f);
1782
87b3bd1c 1783 if (num > VIRTIO_QUEUE_MAX) {
8a1be662 1784 error_report("Invalid number of virtqueues: 0x%x", num);
cc459952
MT
1785 return -1;
1786 }
1787
967f97fa
AL
1788 for (i = 0; i < num; i++) {
1789 vdev->vq[i].vring.num = qemu_get_be32(f);
6ce69d1c
PM
1790 if (k->has_variable_vring_alignment) {
1791 vdev->vq[i].vring.align = qemu_get_be32(f);
1792 }
ab223c95 1793 vdev->vq[i].vring.desc = qemu_get_be64(f);
967f97fa 1794 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
bcbabae8 1795 vdev->vq[i].signalled_used_valid = false;
aff8fd18 1796 vdev->vq[i].notification_disabled = 0;
967f97fa 1797
ab223c95
CH
1798 if (vdev->vq[i].vring.desc) {
1799 /* XXX virtio-1 devices */
1800 virtio_queue_update_rings(vdev, i);
1abeb5a6
MT
1801 } else if (vdev->vq[i].last_avail_idx) {
1802 error_report("VQ %d address 0x0 "
6daf194d 1803 "inconsistent with Host index 0x%x",
1abeb5a6
MT
1804 i, vdev->vq[i].last_avail_idx);
1805 return -1;
8275e2f6 1806 }
1c819449
FK
1807 if (k->load_queue) {
1808 ret = k->load_queue(qbus->parent, i, f);
ff24bd58
MT
1809 if (ret)
1810 return ret;
7055e687 1811 }
967f97fa
AL
1812 }
1813
7055e687 1814 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1b5fc0de
GK
1815
1816 if (vdc->load != NULL) {
6b321a3d
GK
1817 ret = vdc->load(vdev, f, version_id);
1818 if (ret) {
1819 return ret;
1820 }
1b5fc0de
GK
1821 }
1822
ea43e259
DDAG
1823 if (vdc->vmsd) {
1824 ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
1825 if (ret) {
1826 return ret;
1827 }
1828 }
1829
616a6552
GK
1830 /* Subsections */
1831 ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
1832 if (ret) {
1833 return ret;
1834 }
1835
1836 if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
1837 vdev->device_endian = virtio_default_endian();
1838 }
1839
019a3edb
GH
1840 if (virtio_64bit_features_needed(vdev)) {
1841 /*
1842 * Subsection load filled vdev->guest_features. Run them
1843 * through virtio_set_features to sanity-check them against
1844 * host_features.
1845 */
1846 uint64_t features64 = vdev->guest_features;
6c0196d7 1847 if (virtio_set_features_nocheck(vdev, features64) < 0) {
019a3edb
GH
1848 error_report("Features 0x%" PRIx64 " unsupported. "
1849 "Allowed features: 0x%" PRIx64,
1850 features64, vdev->host_features);
1851 return -1;
1852 }
1853 } else {
6c0196d7 1854 if (virtio_set_features_nocheck(vdev, features) < 0) {
019a3edb
GH
1855 error_report("Features 0x%x unsupported. "
1856 "Allowed features: 0x%" PRIx64,
1857 features, vdev->host_features);
1858 return -1;
1859 }
1860 }
1861
616a6552 1862 for (i = 0; i < num; i++) {
ab223c95 1863 if (vdev->vq[i].vring.desc) {
616a6552
GK
1864 uint16_t nheads;
1865 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
1866 /* Check it isn't doing strange things with descriptor numbers. */
1867 if (nheads > vdev->vq[i].vring.num) {
1868 error_report("VQ %d size 0x%x Guest index 0x%x "
1869 "inconsistent with Host index 0x%x: delta 0x%x",
1870 i, vdev->vq[i].vring.num,
1871 vring_avail_idx(&vdev->vq[i]),
1872 vdev->vq[i].last_avail_idx, nheads);
1873 return -1;
1874 }
b796fcd1 1875 vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
be1fea9b 1876 vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
bccdef6b
SH
1877
1878 /*
1879 * Some devices migrate VirtQueueElements that have been popped
1880 * from the avail ring but not yet returned to the used ring.
e66bcc40
HP
1881 * Since max ring size < UINT16_MAX it's safe to use modulo
1882 * UINT16_MAX + 1 subtraction.
bccdef6b 1883 */
e66bcc40
HP
1884 vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
1885 vdev->vq[i].used_idx);
bccdef6b
SH
1886 if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
1887 error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
1888 "used_idx 0x%x",
1889 i, vdev->vq[i].vring.num,
1890 vdev->vq[i].last_avail_idx,
1891 vdev->vq[i].used_idx);
1892 return -1;
1893 }
616a6552
GK
1894 }
1895 }
1896
1897 return 0;
967f97fa
AL
1898}
1899
6a1a8cc7 1900void virtio_cleanup(VirtIODevice *vdev)
b946a153 1901{
85cf2a8d 1902 qemu_del_vm_change_state_handler(vdev->vmstate);
6f79e06b 1903 g_free(vdev->config);
7267c094 1904 g_free(vdev->vq);
e0d686bf 1905 g_free(vdev->vector_queues);
8e05db92
FK
1906}
1907
1dfb4dd9 1908static void virtio_vmstate_change(void *opaque, int running, RunState state)
85cf2a8d
MT
1909{
1910 VirtIODevice *vdev = opaque;
1c819449
FK
1911 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1912 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
85cf2a8d 1913 bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
9e8e8c48 1914 vdev->vm_running = running;
85cf2a8d
MT
1915
1916 if (backend_run) {
1917 virtio_set_status(vdev, vdev->status);
1918 }
1919
1c819449
FK
1920 if (k->vmstate_change) {
1921 k->vmstate_change(qbus->parent, backend_run);
85cf2a8d
MT
1922 }
1923
1924 if (!backend_run) {
1925 virtio_set_status(vdev, vdev->status);
1926 }
1927}
1928
c8075caf
GA
1929void virtio_instance_init_common(Object *proxy_obj, void *data,
1930 size_t vdev_size, const char *vdev_name)
1931{
1932 DeviceState *vdev = data;
1933
1934 object_initialize(vdev, vdev_size, vdev_name);
1935 object_property_add_child(proxy_obj, "virtio-backend", OBJECT(vdev), NULL);
1936 object_unref(OBJECT(vdev));
1937 qdev_alias_all_properties(vdev, proxy_obj);
1938}
1939
8e05db92
FK
1940void virtio_init(VirtIODevice *vdev, const char *name,
1941 uint16_t device_id, size_t config_size)
967f97fa 1942{
e0d686bf
JW
1943 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1944 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
b8193adb 1945 int i;
e0d686bf
JW
1946 int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
1947
1948 if (nvectors) {
1949 vdev->vector_queues =
1950 g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
1951 }
1952
53c25cea 1953 vdev->device_id = device_id;
967f97fa 1954 vdev->status = 0;
0687c37c 1955 atomic_set(&vdev->isr, 0);
967f97fa 1956 vdev->queue_sel = 0;
7055e687 1957 vdev->config_vector = VIRTIO_NO_VECTOR;
87b3bd1c 1958 vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
1354869c 1959 vdev->vm_running = runstate_is_running();
f5ed3663 1960 vdev->broken = false;
87b3bd1c 1961 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
b8193adb 1962 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
1cbdabe2 1963 vdev->vq[i].vdev = vdev;
e78a2b42 1964 vdev->vq[i].queue_index = i;
1cbdabe2 1965 }
967f97fa 1966
967f97fa
AL
1967 vdev->name = name;
1968 vdev->config_len = config_size;
8e05db92 1969 if (vdev->config_len) {
7267c094 1970 vdev->config = g_malloc0(config_size);
8e05db92 1971 } else {
967f97fa 1972 vdev->config = NULL;
8e05db92
FK
1973 }
1974 vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
1975 vdev);
616a6552 1976 vdev->device_endian = virtio_default_endian();
5669655a 1977 vdev->use_guest_notifier_mask = true;
8e05db92 1978}
967f97fa 1979
a8170e5e 1980hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
1cbdabe2
MT
1981{
1982 return vdev->vq[n].vring.desc;
1983}
1984
a8170e5e 1985hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
1cbdabe2
MT
1986{
1987 return vdev->vq[n].vring.avail;
1988}
1989
a8170e5e 1990hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
1cbdabe2
MT
1991{
1992 return vdev->vq[n].vring.used;
1993}
1994
a8170e5e 1995hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
1cbdabe2
MT
1996{
1997 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
1998}
1999
a8170e5e 2000hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
1cbdabe2
MT
2001{
2002 return offsetof(VRingAvail, ring) +
50764fc8 2003 sizeof(uint16_t) * vdev->vq[n].vring.num;
1cbdabe2
MT
2004}
2005
a8170e5e 2006hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
1cbdabe2
MT
2007{
2008 return offsetof(VRingUsed, ring) +
2009 sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
2010}
2011
1cbdabe2
MT
2012uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
2013{
2014 return vdev->vq[n].last_avail_idx;
2015}
2016
2017void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
2018{
2019 vdev->vq[n].last_avail_idx = idx;
be1fea9b 2020 vdev->vq[n].shadow_avail_idx = idx;
1cbdabe2
MT
2021}
2022
312d3b35
YB
2023void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
2024{
2025 vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
2026}
2027
6793dfd1
SH
2028void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
2029{
2030 vdev->vq[n].signalled_used_valid = false;
2031}
2032
1cbdabe2
MT
2033VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
2034{
2035 return vdev->vq + n;
2036}
2037
e78a2b42
JW
2038uint16_t virtio_get_queue_index(VirtQueue *vq)
2039{
2040 return vq->queue_index;
2041}
2042
15b2bd18
PB
2043static void virtio_queue_guest_notifier_read(EventNotifier *n)
2044{
2045 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
2046 if (event_notifier_test_and_clear(n)) {
83d768b5 2047 virtio_notify_vector(vq->vdev, vq->vector);
15b2bd18
PB
2048 }
2049}
2050
2051void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
2052 bool with_irqfd)
2053{
2054 if (assign && !with_irqfd) {
54e18d35 2055 event_notifier_set_handler(&vq->guest_notifier, false,
15b2bd18
PB
2056 virtio_queue_guest_notifier_read);
2057 } else {
54e18d35 2058 event_notifier_set_handler(&vq->guest_notifier, false, NULL);
15b2bd18
PB
2059 }
2060 if (!assign) {
2061 /* Test and clear notifier before closing it,
2062 * in case poll callback didn't have time to run. */
2063 virtio_queue_guest_notifier_read(&vq->guest_notifier);
2064 }
2065}
2066
1cbdabe2
MT
2067EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
2068{
2069 return &vq->guest_notifier;
2070}
b1f416aa 2071
344dc16f 2072static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
b1f416aa
PB
2073{
2074 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2075 if (event_notifier_test_and_clear(n)) {
344dc16f 2076 virtio_queue_notify_aio_vq(vq);
b1f416aa
PB
2077 }
2078}
2079
a7c8215e
SH
2080static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
2081{
2082 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2083
2084 virtio_queue_set_notification(vq, 0);
2085}
2086
0062ea0f
SH
2087static bool virtio_queue_host_notifier_aio_poll(void *opaque)
2088{
2089 EventNotifier *n = opaque;
2090 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2091
2092 if (virtio_queue_empty(vq)) {
2093 return false;
2094 }
2095
2096 virtio_queue_notify_aio_vq(vq);
2097 return true;
2098}
2099
a7c8215e
SH
2100static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
2101{
2102 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2103
2104 /* Caller polls once more after this to catch requests that race with us */
2105 virtio_queue_set_notification(vq, 1);
2106}
2107
a1afb606 2108void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
bf1780b0 2109 VirtIOHandleOutput handle_output)
a1afb606 2110{
a378b49a
PB
2111 if (handle_output) {
2112 vq->handle_aio_output = handle_output;
a1afb606 2113 aio_set_event_notifier(ctx, &vq->host_notifier, true,
0062ea0f
SH
2114 virtio_queue_host_notifier_aio_read,
2115 virtio_queue_host_notifier_aio_poll);
a7c8215e
SH
2116 aio_set_event_notifier_poll(ctx, &vq->host_notifier,
2117 virtio_queue_host_notifier_aio_poll_begin,
2118 virtio_queue_host_notifier_aio_poll_end);
a1afb606 2119 } else {
f6a51c84 2120 aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL);
a1afb606
PB
2121 /* Test and clear notifier before after disabling event,
2122 * in case poll callback didn't have time to run. */
344dc16f 2123 virtio_queue_host_notifier_aio_read(&vq->host_notifier);
a378b49a 2124 vq->handle_aio_output = NULL;
344dc16f
MT
2125 }
2126}
2127
fa283a4a 2128void virtio_queue_host_notifier_read(EventNotifier *n)
344dc16f
MT
2129{
2130 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
2131 if (event_notifier_test_and_clear(n)) {
2132 virtio_queue_notify_vq(vq);
a1afb606
PB
2133 }
2134}
2135
1cbdabe2
MT
2136EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
2137{
2138 return &vq->host_notifier;
2139}
8e05db92 2140
1034e9cf
FK
2141void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
2142{
9e288406 2143 g_free(vdev->bus_name);
80e0090a 2144 vdev->bus_name = g_strdup(bus_name);
1034e9cf
FK
2145}
2146
f5ed3663
SH
2147void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
2148{
2149 va_list ap;
2150
2151 va_start(ap, fmt);
2152 error_vreport(fmt, ap);
2153 va_end(ap);
2154
2155 vdev->broken = true;
2156
2157 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2158 virtio_set_status(vdev, vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET);
2159 virtio_notify_config(vdev);
2160 }
2161}
2162
1d244b42
AF
2163static void virtio_device_realize(DeviceState *dev, Error **errp)
2164{
2165 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2166 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
2167 Error *err = NULL;
2168
ea43e259
DDAG
2169 /* Devices should either use vmsd or the load/save methods */
2170 assert(!vdc->vmsd || !vdc->load);
2171
1d244b42
AF
2172 if (vdc->realize != NULL) {
2173 vdc->realize(dev, &err);
2174 if (err != NULL) {
2175 error_propagate(errp, err);
2176 return;
2177 }
8e05db92 2178 }
e8398045
JW
2179
2180 virtio_bus_device_plugged(vdev, &err);
2181 if (err != NULL) {
2182 error_propagate(errp, err);
2183 return;
2184 }
8e05db92
FK
2185}
2186
1d244b42 2187static void virtio_device_unrealize(DeviceState *dev, Error **errp)
1034e9cf 2188{
1d244b42 2189 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
306ec6c3
AF
2190 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
2191 Error *err = NULL;
1d244b42 2192
83d07047
PB
2193 virtio_bus_device_unplugged(vdev);
2194
306ec6c3
AF
2195 if (vdc->unrealize != NULL) {
2196 vdc->unrealize(dev, &err);
2197 if (err != NULL) {
2198 error_propagate(errp, err);
2199 return;
2200 }
5e96f5d2 2201 }
1d244b42 2202
9e288406
MA
2203 g_free(vdev->bus_name);
2204 vdev->bus_name = NULL;
1034e9cf
FK
2205}
2206
6b8f1020
CH
2207static Property virtio_properties[] = {
2208 DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
2209 DEFINE_PROP_END_OF_LIST(),
2210};
2211
ff4c07df
PB
2212static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
2213{
2214 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
ff4c07df
PB
2215 int n, r, err;
2216
2217 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
fa283a4a 2218 VirtQueue *vq = &vdev->vq[n];
ff4c07df
PB
2219 if (!virtio_queue_get_num(vdev, n)) {
2220 continue;
2221 }
ed08a2a0 2222 r = virtio_bus_set_host_notifier(qbus, n, true);
ff4c07df
PB
2223 if (r < 0) {
2224 err = r;
2225 goto assign_error;
2226 }
fa283a4a
PB
2227 event_notifier_set_handler(&vq->host_notifier, true,
2228 virtio_queue_host_notifier_read);
6019f3b9
PB
2229 }
2230
2231 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
2232 /* Kick right away to begin processing requests already in vring */
2233 VirtQueue *vq = &vdev->vq[n];
2234 if (!vq->vring.num) {
2235 continue;
2236 }
2237 event_notifier_set(&vq->host_notifier);
ff4c07df
PB
2238 }
2239 return 0;
2240
2241assign_error:
2242 while (--n >= 0) {
fa283a4a 2243 VirtQueue *vq = &vdev->vq[n];
ff4c07df
PB
2244 if (!virtio_queue_get_num(vdev, n)) {
2245 continue;
2246 }
2247
fa283a4a 2248 event_notifier_set_handler(&vq->host_notifier, true, NULL);
ed08a2a0 2249 r = virtio_bus_set_host_notifier(qbus, n, false);
ff4c07df
PB
2250 assert(r >= 0);
2251 }
2252 return err;
2253}
2254
2255int virtio_device_start_ioeventfd(VirtIODevice *vdev)
2256{
2257 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2258 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2259
2260 return virtio_bus_start_ioeventfd(vbus);
2261}
2262
2263static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
2264{
2265 VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
ff4c07df
PB
2266 int n, r;
2267
2268 for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
fa283a4a
PB
2269 VirtQueue *vq = &vdev->vq[n];
2270
ff4c07df
PB
2271 if (!virtio_queue_get_num(vdev, n)) {
2272 continue;
2273 }
fa283a4a 2274 event_notifier_set_handler(&vq->host_notifier, true, NULL);
ed08a2a0 2275 r = virtio_bus_set_host_notifier(qbus, n, false);
ff4c07df
PB
2276 assert(r >= 0);
2277 }
2278}
2279
2280void virtio_device_stop_ioeventfd(VirtIODevice *vdev)
2281{
2282 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2283 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2284
2285 virtio_bus_stop_ioeventfd(vbus);
2286}
2287
310837de
PB
2288int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
2289{
2290 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2291 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2292
2293 return virtio_bus_grab_ioeventfd(vbus);
2294}
2295
2296void virtio_device_release_ioeventfd(VirtIODevice *vdev)
2297{
2298 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2299 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2300
2301 virtio_bus_release_ioeventfd(vbus);
2302}
2303
8e05db92
FK
2304static void virtio_device_class_init(ObjectClass *klass, void *data)
2305{
2306 /* Set the default value here. */
ff4c07df 2307 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
8e05db92 2308 DeviceClass *dc = DEVICE_CLASS(klass);
1d244b42
AF
2309
2310 dc->realize = virtio_device_realize;
2311 dc->unrealize = virtio_device_unrealize;
8e05db92 2312 dc->bus_type = TYPE_VIRTIO_BUS;
6b8f1020 2313 dc->props = virtio_properties;
ff4c07df
PB
2314 vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
2315 vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
9b706dbb
MT
2316
2317 vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
8e05db92
FK
2318}
2319
8e93cef1
PB
2320bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
2321{
2322 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2323 VirtioBusState *vbus = VIRTIO_BUS(qbus);
2324
2325 return virtio_bus_ioeventfd_enabled(vbus);
2326}
2327
8e05db92
FK
2328static const TypeInfo virtio_device_info = {
2329 .name = TYPE_VIRTIO_DEVICE,
2330 .parent = TYPE_DEVICE,
2331 .instance_size = sizeof(VirtIODevice),
2332 .class_init = virtio_device_class_init,
2333 .abstract = true,
2334 .class_size = sizeof(VirtioDeviceClass),
2335};
2336
2337static void virtio_register_types(void)
2338{
2339 type_register_static(&virtio_device_info);
2340}
2341
2342type_init(virtio_register_types)
This page took 1.218602 seconds and 4 git commands to generate.