4 * Copyright Red Hat, Inc. 2013-2014
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/units.h"
16 #include "qemu-common.h"
18 #include "ui/console.h"
20 #include "hw/virtio/virtio.h"
21 #include "hw/virtio/virtio-gpu.h"
22 #include "hw/virtio/virtio-bus.h"
23 #include "migration/blocker.h"
25 #include "qapi/error.h"
27 #define VIRTIO_GPU_VM_VERSION 1
29 static struct virtio_gpu_simple_resource*
30 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
32 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res);
35 virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr)
37 le32_to_cpus(&hdr->type);
38 le32_to_cpus(&hdr->flags);
39 le64_to_cpus(&hdr->fence_id);
40 le32_to_cpus(&hdr->ctx_id);
41 le32_to_cpus(&hdr->padding);
44 static void virtio_gpu_bswap_32(void *ptr,
47 #ifdef HOST_WORDS_BIGENDIAN
50 struct virtio_gpu_ctrl_hdr *hdr = (struct virtio_gpu_ctrl_hdr *) ptr;
52 virtio_gpu_ctrl_hdr_bswap(hdr);
54 i = sizeof(struct virtio_gpu_ctrl_hdr);
56 le32_to_cpus((uint32_t *)(ptr + i));
57 i = i + sizeof(uint32_t);
64 virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d *t2d)
66 virtio_gpu_ctrl_hdr_bswap(&t2d->hdr);
67 le32_to_cpus(&t2d->r.x);
68 le32_to_cpus(&t2d->r.y);
69 le32_to_cpus(&t2d->r.width);
70 le32_to_cpus(&t2d->r.height);
71 le64_to_cpus(&t2d->offset);
72 le32_to_cpus(&t2d->resource_id);
73 le32_to_cpus(&t2d->padding);
77 #include <virglrenderer.h>
78 #define VIRGL(_g, _virgl, _simple, ...) \
80 if (_g->use_virgl_renderer) { \
81 _virgl(__VA_ARGS__); \
83 _simple(__VA_ARGS__); \
87 #define VIRGL(_g, _virgl, _simple, ...) \
89 _simple(__VA_ARGS__); \
93 static void update_cursor_data_simple(VirtIOGPU *g,
94 struct virtio_gpu_scanout *s,
97 struct virtio_gpu_simple_resource *res;
100 res = virtio_gpu_find_resource(g, resource_id);
105 if (pixman_image_get_width(res->image) != s->current_cursor->width ||
106 pixman_image_get_height(res->image) != s->current_cursor->height) {
110 pixels = s->current_cursor->width * s->current_cursor->height;
111 memcpy(s->current_cursor->data,
112 pixman_image_get_data(res->image),
113 pixels * sizeof(uint32_t));
118 static void update_cursor_data_virgl(VirtIOGPU *g,
119 struct virtio_gpu_scanout *s,
120 uint32_t resource_id)
122 uint32_t width, height;
123 uint32_t pixels, *data;
125 data = virgl_renderer_get_cursor_data(resource_id, &width, &height);
130 if (width != s->current_cursor->width ||
131 height != s->current_cursor->height) {
136 pixels = s->current_cursor->width * s->current_cursor->height;
137 memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t));
143 static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
145 struct virtio_gpu_scanout *s;
146 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
148 if (cursor->pos.scanout_id >= g->conf.max_outputs) {
151 s = &g->scanout[cursor->pos.scanout_id];
153 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
156 move ? "move" : "update",
157 cursor->resource_id);
160 if (!s->current_cursor) {
161 s->current_cursor = cursor_alloc(64, 64);
164 s->current_cursor->hot_x = cursor->hot_x;
165 s->current_cursor->hot_y = cursor->hot_y;
167 if (cursor->resource_id > 0) {
168 VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple,
169 g, s, cursor->resource_id);
171 dpy_cursor_define(s->con, s->current_cursor);
175 s->cursor.pos.x = cursor->pos.x;
176 s->cursor.pos.y = cursor->pos.y;
178 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
179 cursor->resource_id ? 1 : 0);
182 static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
184 VirtIOGPU *g = VIRTIO_GPU(vdev);
185 memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
188 static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
190 VirtIOGPU *g = VIRTIO_GPU(vdev);
191 struct virtio_gpu_config vgconfig;
193 memcpy(&vgconfig, config, sizeof(g->virtio_config));
195 if (vgconfig.events_clear) {
196 g->virtio_config.events_read &= ~vgconfig.events_clear;
200 static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features,
203 VirtIOGPU *g = VIRTIO_GPU(vdev);
205 if (virtio_gpu_virgl_enabled(g->conf)) {
206 features |= (1 << VIRTIO_GPU_F_VIRGL);
211 static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features)
213 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL);
214 VirtIOGPU *g = VIRTIO_GPU(vdev);
216 g->use_virgl_renderer = ((features & virgl) == virgl);
217 trace_virtio_gpu_features(g->use_virgl_renderer);
220 static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type)
222 g->virtio_config.events_read |= event_type;
223 virtio_notify_config(&g->parent_obj);
226 static struct virtio_gpu_simple_resource *
227 virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
229 struct virtio_gpu_simple_resource *res;
231 QTAILQ_FOREACH(res, &g->reslist, next) {
232 if (res->resource_id == resource_id) {
239 void virtio_gpu_ctrl_response(VirtIOGPU *g,
240 struct virtio_gpu_ctrl_command *cmd,
241 struct virtio_gpu_ctrl_hdr *resp,
246 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
247 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
248 resp->fence_id = cmd->cmd_hdr.fence_id;
249 resp->ctx_id = cmd->cmd_hdr.ctx_id;
251 virtio_gpu_ctrl_hdr_bswap(resp);
252 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
254 qemu_log_mask(LOG_GUEST_ERROR,
255 "%s: response size incorrect %zu vs %zu\n",
256 __func__, s, resp_len);
258 virtqueue_push(cmd->vq, &cmd->elem, s);
259 virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
260 cmd->finished = true;
263 void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
264 struct virtio_gpu_ctrl_command *cmd,
265 enum virtio_gpu_ctrl_type type)
267 struct virtio_gpu_ctrl_hdr resp;
269 memset(&resp, 0, sizeof(resp));
271 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
275 virtio_gpu_fill_display_info(VirtIOGPU *g,
276 struct virtio_gpu_resp_display_info *dpy_info)
280 for (i = 0; i < g->conf.max_outputs; i++) {
281 if (g->enabled_output_bitmask & (1 << i)) {
282 dpy_info->pmodes[i].enabled = 1;
283 dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width);
284 dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height);
289 void virtio_gpu_get_display_info(VirtIOGPU *g,
290 struct virtio_gpu_ctrl_command *cmd)
292 struct virtio_gpu_resp_display_info display_info;
294 trace_virtio_gpu_cmd_get_display_info();
295 memset(&display_info, 0, sizeof(display_info));
296 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
297 virtio_gpu_fill_display_info(g, &display_info);
298 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
299 sizeof(display_info));
302 static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format)
304 switch (virtio_gpu_format) {
305 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
306 return PIXMAN_BE_b8g8r8x8;
307 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
308 return PIXMAN_BE_b8g8r8a8;
309 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
310 return PIXMAN_BE_x8r8g8b8;
311 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
312 return PIXMAN_BE_a8r8g8b8;
313 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
314 return PIXMAN_BE_r8g8b8x8;
315 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
316 return PIXMAN_BE_r8g8b8a8;
317 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
318 return PIXMAN_BE_x8b8g8r8;
319 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
320 return PIXMAN_BE_a8b8g8r8;
326 static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
327 uint32_t width, uint32_t height)
329 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
330 * pixman_image_create_bits will fail in case it overflow.
333 int bpp = PIXMAN_FORMAT_BPP(pformat);
334 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
335 return height * stride;
338 static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
339 struct virtio_gpu_ctrl_command *cmd)
341 pixman_format_code_t pformat;
342 struct virtio_gpu_simple_resource *res;
343 struct virtio_gpu_resource_create_2d c2d;
345 VIRTIO_GPU_FILL_CMD(c2d);
346 virtio_gpu_bswap_32(&c2d, sizeof(c2d));
347 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
348 c2d.width, c2d.height);
350 if (c2d.resource_id == 0) {
351 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
353 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
357 res = virtio_gpu_find_resource(g, c2d.resource_id);
359 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
360 __func__, c2d.resource_id);
361 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
365 res = g_new0(struct virtio_gpu_simple_resource, 1);
367 res->width = c2d.width;
368 res->height = c2d.height;
369 res->format = c2d.format;
370 res->resource_id = c2d.resource_id;
372 pformat = get_pixman_format(c2d.format);
374 qemu_log_mask(LOG_GUEST_ERROR,
375 "%s: host couldn't handle guest format %d\n",
376 __func__, c2d.format);
378 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
382 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
383 if (res->hostmem + g->hostmem < g->conf.max_hostmem) {
384 res->image = pixman_image_create_bits(pformat,
391 qemu_log_mask(LOG_GUEST_ERROR,
392 "%s: resource creation failed %d %d %d\n",
393 __func__, c2d.resource_id, c2d.width, c2d.height);
395 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
399 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
400 g->hostmem += res->hostmem;
403 static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
405 struct virtio_gpu_scanout *scanout = &g->scanout[scanout_id];
406 struct virtio_gpu_simple_resource *res;
407 DisplaySurface *ds = NULL;
409 if (scanout->resource_id == 0) {
413 res = virtio_gpu_find_resource(g, scanout->resource_id);
415 res->scanout_bitmask &= ~(1 << scanout_id);
418 if (scanout_id == 0) {
420 ds = qemu_create_message_surface(scanout->width ?: 640,
421 scanout->height ?: 480,
422 "Guest disabled display.");
424 dpy_gfx_replace_surface(scanout->con, ds);
425 scanout->resource_id = 0;
431 static void virtio_gpu_resource_destroy(VirtIOGPU *g,
432 struct virtio_gpu_simple_resource *res)
436 if (res->scanout_bitmask) {
437 for (i = 0; i < g->conf.max_outputs; i++) {
438 if (res->scanout_bitmask & (1 << i)) {
439 virtio_gpu_disable_scanout(g, i);
444 pixman_image_unref(res->image);
445 virtio_gpu_cleanup_mapping(res);
446 QTAILQ_REMOVE(&g->reslist, res, next);
447 g->hostmem -= res->hostmem;
451 static void virtio_gpu_resource_unref(VirtIOGPU *g,
452 struct virtio_gpu_ctrl_command *cmd)
454 struct virtio_gpu_simple_resource *res;
455 struct virtio_gpu_resource_unref unref;
457 VIRTIO_GPU_FILL_CMD(unref);
458 virtio_gpu_bswap_32(&unref, sizeof(unref));
459 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
461 res = virtio_gpu_find_resource(g, unref.resource_id);
463 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
464 __func__, unref.resource_id);
465 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
468 virtio_gpu_resource_destroy(g, res);
471 static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
472 struct virtio_gpu_ctrl_command *cmd)
474 struct virtio_gpu_simple_resource *res;
476 uint32_t src_offset, dst_offset, stride;
478 pixman_format_code_t format;
479 struct virtio_gpu_transfer_to_host_2d t2d;
481 VIRTIO_GPU_FILL_CMD(t2d);
482 virtio_gpu_t2d_bswap(&t2d);
483 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
485 res = virtio_gpu_find_resource(g, t2d.resource_id);
486 if (!res || !res->iov) {
487 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
488 __func__, t2d.resource_id);
489 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
493 if (t2d.r.x > res->width ||
494 t2d.r.y > res->height ||
495 t2d.r.width > res->width ||
496 t2d.r.height > res->height ||
497 t2d.r.x + t2d.r.width > res->width ||
498 t2d.r.y + t2d.r.height > res->height) {
499 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
500 " bounds for resource %d: %d %d %d %d vs %d %d\n",
501 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
502 t2d.r.width, t2d.r.height, res->width, res->height);
503 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
507 format = pixman_image_get_format(res->image);
508 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
509 stride = pixman_image_get_stride(res->image);
511 if (t2d.offset || t2d.r.x || t2d.r.y ||
512 t2d.r.width != pixman_image_get_width(res->image)) {
513 void *img_data = pixman_image_get_data(res->image);
514 for (h = 0; h < t2d.r.height; h++) {
515 src_offset = t2d.offset + stride * h;
516 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
518 iov_to_buf(res->iov, res->iov_cnt, src_offset,
520 + dst_offset, t2d.r.width * bpp);
523 iov_to_buf(res->iov, res->iov_cnt, 0,
524 pixman_image_get_data(res->image),
525 pixman_image_get_stride(res->image)
526 * pixman_image_get_height(res->image));
530 static void virtio_gpu_resource_flush(VirtIOGPU *g,
531 struct virtio_gpu_ctrl_command *cmd)
533 struct virtio_gpu_simple_resource *res;
534 struct virtio_gpu_resource_flush rf;
535 pixman_region16_t flush_region;
538 VIRTIO_GPU_FILL_CMD(rf);
539 virtio_gpu_bswap_32(&rf, sizeof(rf));
540 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
541 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
543 res = virtio_gpu_find_resource(g, rf.resource_id);
545 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
546 __func__, rf.resource_id);
547 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
551 if (rf.r.x > res->width ||
552 rf.r.y > res->height ||
553 rf.r.width > res->width ||
554 rf.r.height > res->height ||
555 rf.r.x + rf.r.width > res->width ||
556 rf.r.y + rf.r.height > res->height) {
557 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
558 " bounds for resource %d: %d %d %d %d vs %d %d\n",
559 __func__, rf.resource_id, rf.r.x, rf.r.y,
560 rf.r.width, rf.r.height, res->width, res->height);
561 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
565 pixman_region_init_rect(&flush_region,
566 rf.r.x, rf.r.y, rf.r.width, rf.r.height);
567 for (i = 0; i < g->conf.max_outputs; i++) {
568 struct virtio_gpu_scanout *scanout;
569 pixman_region16_t region, finalregion;
570 pixman_box16_t *extents;
572 if (!(res->scanout_bitmask & (1 << i))) {
575 scanout = &g->scanout[i];
577 pixman_region_init(&finalregion);
578 pixman_region_init_rect(®ion, scanout->x, scanout->y,
579 scanout->width, scanout->height);
581 pixman_region_intersect(&finalregion, &flush_region, ®ion);
582 pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
583 extents = pixman_region_extents(&finalregion);
584 /* work out the area we need to update for each console */
585 dpy_gfx_update(g->scanout[i].con,
586 extents->x1, extents->y1,
587 extents->x2 - extents->x1,
588 extents->y2 - extents->y1);
590 pixman_region_fini(®ion);
591 pixman_region_fini(&finalregion);
593 pixman_region_fini(&flush_region);
596 static void virtio_unref_resource(pixman_image_t *image, void *data)
598 pixman_image_unref(data);
601 static void virtio_gpu_set_scanout(VirtIOGPU *g,
602 struct virtio_gpu_ctrl_command *cmd)
604 struct virtio_gpu_simple_resource *res, *ores;
605 struct virtio_gpu_scanout *scanout;
606 pixman_format_code_t format;
609 struct virtio_gpu_set_scanout ss;
611 VIRTIO_GPU_FILL_CMD(ss);
612 virtio_gpu_bswap_32(&ss, sizeof(ss));
613 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
614 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
616 if (ss.scanout_id >= g->conf.max_outputs) {
617 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
618 __func__, ss.scanout_id);
619 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
624 if (ss.resource_id == 0) {
625 virtio_gpu_disable_scanout(g, ss.scanout_id);
629 /* create a surface for this scanout */
630 res = virtio_gpu_find_resource(g, ss.resource_id);
632 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
633 __func__, ss.resource_id);
634 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
638 if (ss.r.x > res->width ||
639 ss.r.y > res->height ||
640 ss.r.width > res->width ||
641 ss.r.height > res->height ||
642 ss.r.x + ss.r.width > res->width ||
643 ss.r.y + ss.r.height > res->height) {
644 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
645 " resource %d, (%d,%d)+%d,%d vs %d %d\n",
646 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
647 ss.r.width, ss.r.height, res->width, res->height);
648 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
652 scanout = &g->scanout[ss.scanout_id];
654 format = pixman_image_get_format(res->image);
655 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
656 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image);
657 if (!scanout->ds || surface_data(scanout->ds)
658 != ((uint8_t *)pixman_image_get_data(res->image) + offset) ||
659 scanout->width != ss.r.width ||
660 scanout->height != ss.r.height) {
661 pixman_image_t *rect;
662 void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset;
663 rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr,
664 pixman_image_get_stride(res->image));
665 pixman_image_ref(res->image);
666 pixman_image_set_destroy_function(rect, virtio_unref_resource,
668 /* realloc the surface ptr */
669 scanout->ds = qemu_create_displaysurface_pixman(rect);
671 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
674 pixman_image_unref(rect);
675 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds);
678 ores = virtio_gpu_find_resource(g, scanout->resource_id);
680 ores->scanout_bitmask &= ~(1 << ss.scanout_id);
683 res->scanout_bitmask |= (1 << ss.scanout_id);
684 scanout->resource_id = ss.resource_id;
687 scanout->width = ss.r.width;
688 scanout->height = ss.r.height;
691 int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab,
692 struct virtio_gpu_ctrl_command *cmd,
693 uint64_t **addr, struct iovec **iov)
695 struct virtio_gpu_mem_entry *ents;
699 if (ab->nr_entries > 16384) {
700 qemu_log_mask(LOG_GUEST_ERROR,
701 "%s: nr_entries is too big (%d > 16384)\n",
702 __func__, ab->nr_entries);
706 esize = sizeof(*ents) * ab->nr_entries;
707 ents = g_malloc(esize);
708 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
709 sizeof(*ab), ents, esize);
711 qemu_log_mask(LOG_GUEST_ERROR,
712 "%s: command data size incorrect %zu vs %zu\n",
718 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
720 *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries);
722 for (i = 0; i < ab->nr_entries; i++) {
723 uint64_t a = le64_to_cpu(ents[i].addr);
724 uint32_t l = le32_to_cpu(ents[i].length);
726 (*iov)[i].iov_len = l;
727 (*iov)[i].iov_base = cpu_physical_memory_map(a, &len, 1);
731 if (!(*iov)[i].iov_base || len != l) {
732 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
733 " resource %d element %d\n",
734 __func__, ab->resource_id, i);
735 virtio_gpu_cleanup_mapping_iov(*iov, i);
749 void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count)
753 for (i = 0; i < count; i++) {
754 cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1,
760 static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res)
762 virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt);
770 virtio_gpu_resource_attach_backing(VirtIOGPU *g,
771 struct virtio_gpu_ctrl_command *cmd)
773 struct virtio_gpu_simple_resource *res;
774 struct virtio_gpu_resource_attach_backing ab;
777 VIRTIO_GPU_FILL_CMD(ab);
778 virtio_gpu_bswap_32(&ab, sizeof(ab));
779 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
781 res = virtio_gpu_find_resource(g, ab.resource_id);
783 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
784 __func__, ab.resource_id);
785 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
790 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
794 ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->addrs, &res->iov);
796 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
800 res->iov_cnt = ab.nr_entries;
804 virtio_gpu_resource_detach_backing(VirtIOGPU *g,
805 struct virtio_gpu_ctrl_command *cmd)
807 struct virtio_gpu_simple_resource *res;
808 struct virtio_gpu_resource_detach_backing detach;
810 VIRTIO_GPU_FILL_CMD(detach);
811 virtio_gpu_bswap_32(&detach, sizeof(detach));
812 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
814 res = virtio_gpu_find_resource(g, detach.resource_id);
815 if (!res || !res->iov) {
816 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
817 __func__, detach.resource_id);
818 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
821 virtio_gpu_cleanup_mapping(res);
824 static void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
825 struct virtio_gpu_ctrl_command *cmd)
827 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
828 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
830 switch (cmd->cmd_hdr.type) {
831 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
832 virtio_gpu_get_display_info(g, cmd);
834 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
835 virtio_gpu_resource_create_2d(g, cmd);
837 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
838 virtio_gpu_resource_unref(g, cmd);
840 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
841 virtio_gpu_resource_flush(g, cmd);
843 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
844 virtio_gpu_transfer_to_host_2d(g, cmd);
846 case VIRTIO_GPU_CMD_SET_SCANOUT:
847 virtio_gpu_set_scanout(g, cmd);
849 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
850 virtio_gpu_resource_attach_backing(g, cmd);
852 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
853 virtio_gpu_resource_detach_backing(g, cmd);
856 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
859 if (!cmd->finished) {
860 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
861 VIRTIO_GPU_RESP_OK_NODATA);
865 static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
867 VirtIOGPU *g = VIRTIO_GPU(vdev);
868 qemu_bh_schedule(g->ctrl_bh);
871 static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
873 VirtIOGPU *g = VIRTIO_GPU(vdev);
874 qemu_bh_schedule(g->cursor_bh);
877 void virtio_gpu_process_cmdq(VirtIOGPU *g)
879 struct virtio_gpu_ctrl_command *cmd;
881 while (!QTAILQ_EMPTY(&g->cmdq)) {
882 cmd = QTAILQ_FIRST(&g->cmdq);
884 /* process command */
885 VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd,
890 QTAILQ_REMOVE(&g->cmdq, cmd, next);
891 if (virtio_gpu_stats_enabled(g->conf)) {
895 if (!cmd->finished) {
896 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
898 if (virtio_gpu_stats_enabled(g->conf)) {
899 if (g->stats.max_inflight < g->inflight) {
900 g->stats.max_inflight = g->inflight;
902 fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
910 static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
912 VirtIOGPU *g = VIRTIO_GPU(vdev);
913 struct virtio_gpu_ctrl_command *cmd;
915 if (!virtio_queue_ready(vq)) {
920 if (!g->renderer_inited && g->use_virgl_renderer) {
921 virtio_gpu_virgl_init(g);
922 g->renderer_inited = true;
926 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
930 cmd->finished = false;
931 cmd->waiting = false;
932 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
933 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
936 virtio_gpu_process_cmdq(g);
939 if (g->use_virgl_renderer) {
940 virtio_gpu_virgl_fence_poll(g);
945 static void virtio_gpu_ctrl_bh(void *opaque)
947 VirtIOGPU *g = opaque;
948 virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq);
951 static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
953 VirtIOGPU *g = VIRTIO_GPU(vdev);
954 VirtQueueElement *elem;
956 struct virtio_gpu_update_cursor cursor_info;
958 if (!virtio_queue_ready(vq)) {
962 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
967 s = iov_to_buf(elem->out_sg, elem->out_num, 0,
968 &cursor_info, sizeof(cursor_info));
969 if (s != sizeof(cursor_info)) {
970 qemu_log_mask(LOG_GUEST_ERROR,
971 "%s: cursor size incorrect %zu vs %zu\n",
972 __func__, s, sizeof(cursor_info));
974 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
975 update_cursor(g, &cursor_info);
977 virtqueue_push(vq, elem, 0);
978 virtio_notify(vdev, vq);
983 static void virtio_gpu_cursor_bh(void *opaque)
985 VirtIOGPU *g = opaque;
986 virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq);
989 static void virtio_gpu_invalidate_display(void *opaque)
993 static void virtio_gpu_update_display(void *opaque)
997 static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
1001 static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
1003 VirtIOGPU *g = opaque;
1005 if (idx >= g->conf.max_outputs) {
1009 g->req_state[idx].x = info->xoff;
1010 g->req_state[idx].y = info->yoff;
1011 g->req_state[idx].width = info->width;
1012 g->req_state[idx].height = info->height;
1014 if (info->width && info->height) {
1015 g->enabled_output_bitmask |= (1 << idx);
1017 g->enabled_output_bitmask &= ~(1 << idx);
1020 /* send event to guest */
1021 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
1025 const GraphicHwOps virtio_gpu_ops = {
1026 .invalidate = virtio_gpu_invalidate_display,
1027 .gfx_update = virtio_gpu_update_display,
1028 .text_update = virtio_gpu_text_update,
1029 .ui_info = virtio_gpu_ui_info,
1031 .gl_block = virtio_gpu_gl_block,
1035 static const VMStateDescription vmstate_virtio_gpu_scanout = {
1036 .name = "virtio-gpu-one-scanout",
1038 .fields = (VMStateField[]) {
1039 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
1040 VMSTATE_UINT32(width, struct virtio_gpu_scanout),
1041 VMSTATE_UINT32(height, struct virtio_gpu_scanout),
1042 VMSTATE_INT32(x, struct virtio_gpu_scanout),
1043 VMSTATE_INT32(y, struct virtio_gpu_scanout),
1044 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
1045 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
1046 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
1047 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
1048 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
1049 VMSTATE_END_OF_LIST()
1053 static const VMStateDescription vmstate_virtio_gpu_scanouts = {
1054 .name = "virtio-gpu-scanouts",
1056 .fields = (VMStateField[]) {
1057 VMSTATE_INT32(enable, struct VirtIOGPU),
1058 VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU, NULL),
1059 VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU,
1060 conf.max_outputs, 1,
1061 vmstate_virtio_gpu_scanout,
1062 struct virtio_gpu_scanout),
1063 VMSTATE_END_OF_LIST()
1067 static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
1068 VMStateField *field, QJSON *vmdesc)
1070 VirtIOGPU *g = opaque;
1071 struct virtio_gpu_simple_resource *res;
1074 /* in 2d mode we should never find unprocessed commands here */
1075 assert(QTAILQ_EMPTY(&g->cmdq));
1077 QTAILQ_FOREACH(res, &g->reslist, next) {
1078 qemu_put_be32(f, res->resource_id);
1079 qemu_put_be32(f, res->width);
1080 qemu_put_be32(f, res->height);
1081 qemu_put_be32(f, res->format);
1082 qemu_put_be32(f, res->iov_cnt);
1083 for (i = 0; i < res->iov_cnt; i++) {
1084 qemu_put_be64(f, res->addrs[i]);
1085 qemu_put_be32(f, res->iov[i].iov_len);
1087 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
1088 pixman_image_get_stride(res->image) * res->height);
1090 qemu_put_be32(f, 0); /* end of list */
1092 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
1095 static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
1096 VMStateField *field)
1098 VirtIOGPU *g = opaque;
1099 struct virtio_gpu_simple_resource *res;
1100 struct virtio_gpu_scanout *scanout;
1101 uint32_t resource_id, pformat;
1106 resource_id = qemu_get_be32(f);
1107 while (resource_id != 0) {
1108 res = g_new0(struct virtio_gpu_simple_resource, 1);
1109 res->resource_id = resource_id;
1110 res->width = qemu_get_be32(f);
1111 res->height = qemu_get_be32(f);
1112 res->format = qemu_get_be32(f);
1113 res->iov_cnt = qemu_get_be32(f);
1116 pformat = get_pixman_format(res->format);
1121 res->image = pixman_image_create_bits(pformat,
1122 res->width, res->height,
1129 res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
1131 res->addrs = g_new(uint64_t, res->iov_cnt);
1132 res->iov = g_new(struct iovec, res->iov_cnt);
1135 for (i = 0; i < res->iov_cnt; i++) {
1136 res->addrs[i] = qemu_get_be64(f);
1137 res->iov[i].iov_len = qemu_get_be32(f);
1139 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
1140 pixman_image_get_stride(res->image) * res->height);
1142 /* restore mapping */
1143 for (i = 0; i < res->iov_cnt; i++) {
1144 hwaddr len = res->iov[i].iov_len;
1145 res->iov[i].iov_base =
1146 cpu_physical_memory_map(res->addrs[i], &len, 1);
1147 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
1148 /* Clean up the half-a-mapping we just created... */
1149 if (res->iov[i].iov_base) {
1150 cpu_physical_memory_unmap(res->iov[i].iov_base,
1153 /* ...and the mappings for previous loop iterations */
1155 virtio_gpu_cleanup_mapping(res);
1156 pixman_image_unref(res->image);
1162 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
1163 g->hostmem += res->hostmem;
1165 resource_id = qemu_get_be32(f);
1168 /* load & apply scanout state */
1169 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
1170 for (i = 0; i < g->conf.max_outputs; i++) {
1171 scanout = &g->scanout[i];
1172 if (!scanout->resource_id) {
1175 res = virtio_gpu_find_resource(g, scanout->resource_id);
1179 scanout->ds = qemu_create_displaysurface_pixman(res->image);
1184 dpy_gfx_replace_surface(scanout->con, scanout->ds);
1185 dpy_gfx_update(scanout->con, 0, 0, scanout->width, scanout->height);
1186 if (scanout->cursor.resource_id) {
1187 update_cursor(g, &scanout->cursor);
1189 res->scanout_bitmask |= (1 << i);
1195 static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
1197 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1198 VirtIOGPU *g = VIRTIO_GPU(qdev);
1200 Error *local_err = NULL;
1203 if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
1204 error_setg(errp, "virtio-gpu does not support vIOMMU yet");
1208 if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
1209 error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS);
1213 g->use_virgl_renderer = false;
1214 #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
1217 have_virgl = display_opengl;
1220 g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
1223 if (virtio_gpu_virgl_enabled(g->conf)) {
1224 error_setg(&g->migration_blocker, "virgl is not yet migratable");
1225 migrate_add_blocker(g->migration_blocker, &local_err);
1227 error_propagate(errp, local_err);
1228 error_free(g->migration_blocker);
1233 g->config_size = sizeof(struct virtio_gpu_config);
1234 g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs);
1235 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
1238 g->req_state[0].width = g->conf.xres;
1239 g->req_state[0].height = g->conf.yres;
1241 if (virtio_gpu_virgl_enabled(g->conf)) {
1242 /* use larger control queue in 3d mode */
1243 g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb);
1244 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
1246 #if defined(CONFIG_VIRGL)
1247 g->virtio_config.num_capsets = virtio_gpu_virgl_get_num_capsets(g);
1249 g->virtio_config.num_capsets = 0;
1252 g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb);
1253 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
1256 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
1257 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
1258 QTAILQ_INIT(&g->reslist);
1259 QTAILQ_INIT(&g->cmdq);
1260 QTAILQ_INIT(&g->fenceq);
1262 g->enabled_output_bitmask = 1;
1265 for (i = 0; i < g->conf.max_outputs; i++) {
1267 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
1269 dpy_gfx_replace_surface(g->scanout[i].con, NULL);
1274 static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp)
1276 VirtIOGPU *g = VIRTIO_GPU(qdev);
1277 if (g->migration_blocker) {
1278 migrate_del_blocker(g->migration_blocker);
1279 error_free(g->migration_blocker);
1283 static void virtio_gpu_instance_init(Object *obj)
1287 static void virtio_gpu_reset(VirtIODevice *vdev)
1289 VirtIOGPU *g = VIRTIO_GPU(vdev);
1290 struct virtio_gpu_simple_resource *res, *tmp;
1295 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1296 virtio_gpu_resource_destroy(g, res);
1298 for (i = 0; i < g->conf.max_outputs; i++) {
1299 g->scanout[i].resource_id = 0;
1300 g->scanout[i].width = 0;
1301 g->scanout[i].height = 0;
1302 g->scanout[i].x = 0;
1303 g->scanout[i].y = 0;
1304 g->scanout[i].ds = NULL;
1308 if (g->use_virgl_renderer) {
1309 virtio_gpu_virgl_reset(g);
1310 g->use_virgl_renderer = 0;
1316 * For historical reasons virtio_gpu does not adhere to virtio migration
1317 * scheme as described in doc/virtio-migration.txt, in a sense that no
1318 * save/load callback are provided to the core. Instead the device data
1319 * is saved/loaded after the core data.
1321 * Because of this we need a special vmsd.
1323 static const VMStateDescription vmstate_virtio_gpu = {
1324 .name = "virtio-gpu",
1325 .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1326 .version_id = VIRTIO_GPU_VM_VERSION,
1327 .fields = (VMStateField[]) {
1328 VMSTATE_VIRTIO_DEVICE /* core */,
1330 .name = "virtio-gpu",
1331 .info = &(const VMStateInfo) {
1332 .name = "virtio-gpu",
1333 .get = virtio_gpu_load,
1334 .put = virtio_gpu_save,
1336 .flags = VMS_SINGLE,
1338 VMSTATE_END_OF_LIST()
1342 static Property virtio_gpu_properties[] = {
1343 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1),
1344 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf.max_hostmem, 256 * MiB),
1346 DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags,
1347 VIRTIO_GPU_FLAG_VIRGL_ENABLED, true),
1348 DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags,
1349 VIRTIO_GPU_FLAG_STATS_ENABLED, false),
1351 DEFINE_PROP_UINT32("xres", VirtIOGPU, conf.xres, 1024),
1352 DEFINE_PROP_UINT32("yres", VirtIOGPU, conf.yres, 768),
1353 DEFINE_PROP_END_OF_LIST(),
1356 static void virtio_gpu_class_init(ObjectClass *klass, void *data)
1358 DeviceClass *dc = DEVICE_CLASS(klass);
1359 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1361 vdc->realize = virtio_gpu_device_realize;
1362 vdc->unrealize = virtio_gpu_device_unrealize;
1363 vdc->get_config = virtio_gpu_get_config;
1364 vdc->set_config = virtio_gpu_set_config;
1365 vdc->get_features = virtio_gpu_get_features;
1366 vdc->set_features = virtio_gpu_set_features;
1368 vdc->reset = virtio_gpu_reset;
1370 set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
1371 dc->props = virtio_gpu_properties;
1372 dc->vmsd = &vmstate_virtio_gpu;
1373 dc->hotpluggable = false;
1376 static const TypeInfo virtio_gpu_info = {
1377 .name = TYPE_VIRTIO_GPU,
1378 .parent = TYPE_VIRTIO_DEVICE,
1379 .instance_size = sizeof(VirtIOGPU),
1380 .instance_init = virtio_gpu_instance_init,
1381 .class_init = virtio_gpu_class_init,
1384 static void virtio_register_types(void)
1386 type_register_static(&virtio_gpu_info);
1389 type_init(virtio_register_types)
1391 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24);
1392 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56);
1393 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32);
1394 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40);
1395 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48);
1396 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48);
1397 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56);
1398 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16);
1399 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
1400 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
1401 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408);
1403 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72);
1404 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72);
1405 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96);
1406 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24);
1407 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32);
1408 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32);
1409 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32);
1410 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40);
1411 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32);
1412 QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24);