]>
Commit | Line | Data |
---|---|---|
62232bf4 GH |
1 | /* |
2 | * Virtio GPU Device | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2013-2014 | |
5 | * | |
6 | * Authors: | |
7 | * Dave Airlie <[email protected]> | |
8 | * Gerd Hoffmann <[email protected]> | |
9 | * | |
2e252145 | 10 | * This work is licensed under the terms of the GNU GPL, version 2 or later. |
62232bf4 GH |
11 | * See the COPYING file in the top-level directory. |
12 | */ | |
13 | ||
9b8bfe21 | 14 | #include "qemu/osdep.h" |
62232bf4 GH |
15 | #include "qemu-common.h" |
16 | #include "qemu/iov.h" | |
17 | #include "ui/console.h" | |
18 | #include "trace.h" | |
19 | #include "hw/virtio/virtio.h" | |
20 | #include "hw/virtio/virtio-gpu.h" | |
21 | #include "hw/virtio/virtio-bus.h" | |
795c40b8 | 22 | #include "migration/blocker.h" |
03dd024f | 23 | #include "qemu/log.h" |
5e3d741c | 24 | #include "qapi/error.h" |
62232bf4 | 25 | |
0c244e50 GH |
26 | #define VIRTIO_GPU_VM_VERSION 1 |
27 | ||
62232bf4 GH |
28 | static struct virtio_gpu_simple_resource* |
29 | virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); | |
30 | ||
b8e23926 LQ |
31 | static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res); |
32 | ||
9d9e1521 | 33 | #ifdef CONFIG_VIRGL |
a9c94277 | 34 | #include <virglrenderer.h> |
9d9e1521 GH |
35 | #define VIRGL(_g, _virgl, _simple, ...) \ |
36 | do { \ | |
37 | if (_g->use_virgl_renderer) { \ | |
38 | _virgl(__VA_ARGS__); \ | |
39 | } else { \ | |
40 | _simple(__VA_ARGS__); \ | |
41 | } \ | |
42 | } while (0) | |
43 | #else | |
44 | #define VIRGL(_g, _virgl, _simple, ...) \ | |
45 | do { \ | |
46 | _simple(__VA_ARGS__); \ | |
47 | } while (0) | |
48 | #endif | |
49 | ||
62232bf4 GH |
50 | static void update_cursor_data_simple(VirtIOGPU *g, |
51 | struct virtio_gpu_scanout *s, | |
52 | uint32_t resource_id) | |
53 | { | |
54 | struct virtio_gpu_simple_resource *res; | |
55 | uint32_t pixels; | |
56 | ||
57 | res = virtio_gpu_find_resource(g, resource_id); | |
58 | if (!res) { | |
59 | return; | |
60 | } | |
61 | ||
62 | if (pixman_image_get_width(res->image) != s->current_cursor->width || | |
63 | pixman_image_get_height(res->image) != s->current_cursor->height) { | |
64 | return; | |
65 | } | |
66 | ||
67 | pixels = s->current_cursor->width * s->current_cursor->height; | |
68 | memcpy(s->current_cursor->data, | |
69 | pixman_image_get_data(res->image), | |
70 | pixels * sizeof(uint32_t)); | |
71 | } | |
72 | ||
9d9e1521 GH |
73 | #ifdef CONFIG_VIRGL |
74 | ||
75 | static void update_cursor_data_virgl(VirtIOGPU *g, | |
76 | struct virtio_gpu_scanout *s, | |
77 | uint32_t resource_id) | |
78 | { | |
79 | uint32_t width, height; | |
80 | uint32_t pixels, *data; | |
81 | ||
82 | data = virgl_renderer_get_cursor_data(resource_id, &width, &height); | |
83 | if (!data) { | |
84 | return; | |
85 | } | |
86 | ||
87 | if (width != s->current_cursor->width || | |
88 | height != s->current_cursor->height) { | |
2d1cd6c7 | 89 | free(data); |
9d9e1521 GH |
90 | return; |
91 | } | |
92 | ||
93 | pixels = s->current_cursor->width * s->current_cursor->height; | |
94 | memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t)); | |
95 | free(data); | |
96 | } | |
97 | ||
98 | #endif | |
99 | ||
62232bf4 GH |
100 | static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) |
101 | { | |
102 | struct virtio_gpu_scanout *s; | |
0c244e50 | 103 | bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; |
62232bf4 GH |
104 | |
105 | if (cursor->pos.scanout_id >= g->conf.max_outputs) { | |
106 | return; | |
107 | } | |
108 | s = &g->scanout[cursor->pos.scanout_id]; | |
109 | ||
e9c1b459 GH |
110 | trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, |
111 | cursor->pos.x, | |
112 | cursor->pos.y, | |
113 | move ? "move" : "update", | |
114 | cursor->resource_id); | |
115 | ||
0c244e50 | 116 | if (!move) { |
62232bf4 GH |
117 | if (!s->current_cursor) { |
118 | s->current_cursor = cursor_alloc(64, 64); | |
119 | } | |
120 | ||
121 | s->current_cursor->hot_x = cursor->hot_x; | |
122 | s->current_cursor->hot_y = cursor->hot_y; | |
123 | ||
124 | if (cursor->resource_id > 0) { | |
9d9e1521 GH |
125 | VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple, |
126 | g, s, cursor->resource_id); | |
62232bf4 GH |
127 | } |
128 | dpy_cursor_define(s->con, s->current_cursor); | |
0c244e50 GH |
129 | |
130 | s->cursor = *cursor; | |
131 | } else { | |
132 | s->cursor.pos.x = cursor->pos.x; | |
133 | s->cursor.pos.y = cursor->pos.y; | |
62232bf4 GH |
134 | } |
135 | dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, | |
136 | cursor->resource_id ? 1 : 0); | |
137 | } | |
138 | ||
139 | static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) | |
140 | { | |
141 | VirtIOGPU *g = VIRTIO_GPU(vdev); | |
142 | memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); | |
143 | } | |
144 | ||
145 | static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) | |
146 | { | |
147 | VirtIOGPU *g = VIRTIO_GPU(vdev); | |
148 | struct virtio_gpu_config vgconfig; | |
149 | ||
150 | memcpy(&vgconfig, config, sizeof(g->virtio_config)); | |
151 | ||
152 | if (vgconfig.events_clear) { | |
153 | g->virtio_config.events_read &= ~vgconfig.events_clear; | |
154 | } | |
155 | } | |
156 | ||
9d5b731d JW |
157 | static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features, |
158 | Error **errp) | |
62232bf4 | 159 | { |
9d9e1521 GH |
160 | VirtIOGPU *g = VIRTIO_GPU(vdev); |
161 | ||
162 | if (virtio_gpu_virgl_enabled(g->conf)) { | |
fff02bc0 | 163 | features |= (1 << VIRTIO_GPU_F_VIRGL); |
9d9e1521 | 164 | } |
62232bf4 GH |
165 | return features; |
166 | } | |
167 | ||
9d9e1521 GH |
168 | static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features) |
169 | { | |
fff02bc0 | 170 | static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL); |
9d9e1521 GH |
171 | VirtIOGPU *g = VIRTIO_GPU(vdev); |
172 | ||
173 | g->use_virgl_renderer = ((features & virgl) == virgl); | |
174 | trace_virtio_gpu_features(g->use_virgl_renderer); | |
175 | } | |
176 | ||
62232bf4 GH |
177 | static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type) |
178 | { | |
179 | g->virtio_config.events_read |= event_type; | |
180 | virtio_notify_config(&g->parent_obj); | |
181 | } | |
182 | ||
183 | static struct virtio_gpu_simple_resource * | |
184 | virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) | |
185 | { | |
186 | struct virtio_gpu_simple_resource *res; | |
187 | ||
188 | QTAILQ_FOREACH(res, &g->reslist, next) { | |
189 | if (res->resource_id == resource_id) { | |
190 | return res; | |
191 | } | |
192 | } | |
193 | return NULL; | |
194 | } | |
195 | ||
196 | void virtio_gpu_ctrl_response(VirtIOGPU *g, | |
197 | struct virtio_gpu_ctrl_command *cmd, | |
198 | struct virtio_gpu_ctrl_hdr *resp, | |
199 | size_t resp_len) | |
200 | { | |
201 | size_t s; | |
202 | ||
203 | if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { | |
204 | resp->flags |= VIRTIO_GPU_FLAG_FENCE; | |
205 | resp->fence_id = cmd->cmd_hdr.fence_id; | |
206 | resp->ctx_id = cmd->cmd_hdr.ctx_id; | |
207 | } | |
208 | s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); | |
209 | if (s != resp_len) { | |
210 | qemu_log_mask(LOG_GUEST_ERROR, | |
211 | "%s: response size incorrect %zu vs %zu\n", | |
212 | __func__, s, resp_len); | |
213 | } | |
214 | virtqueue_push(cmd->vq, &cmd->elem, s); | |
215 | virtio_notify(VIRTIO_DEVICE(g), cmd->vq); | |
216 | cmd->finished = true; | |
217 | } | |
218 | ||
219 | void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, | |
220 | struct virtio_gpu_ctrl_command *cmd, | |
221 | enum virtio_gpu_ctrl_type type) | |
222 | { | |
223 | struct virtio_gpu_ctrl_hdr resp; | |
224 | ||
225 | memset(&resp, 0, sizeof(resp)); | |
226 | resp.type = type; | |
227 | virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); | |
228 | } | |
229 | ||
230 | static void | |
231 | virtio_gpu_fill_display_info(VirtIOGPU *g, | |
232 | struct virtio_gpu_resp_display_info *dpy_info) | |
233 | { | |
234 | int i; | |
235 | ||
236 | for (i = 0; i < g->conf.max_outputs; i++) { | |
237 | if (g->enabled_output_bitmask & (1 << i)) { | |
238 | dpy_info->pmodes[i].enabled = 1; | |
239 | dpy_info->pmodes[i].r.width = g->req_state[i].width; | |
240 | dpy_info->pmodes[i].r.height = g->req_state[i].height; | |
241 | } | |
242 | } | |
243 | } | |
244 | ||
245 | void virtio_gpu_get_display_info(VirtIOGPU *g, | |
246 | struct virtio_gpu_ctrl_command *cmd) | |
247 | { | |
248 | struct virtio_gpu_resp_display_info display_info; | |
249 | ||
250 | trace_virtio_gpu_cmd_get_display_info(); | |
251 | memset(&display_info, 0, sizeof(display_info)); | |
252 | display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; | |
253 | virtio_gpu_fill_display_info(g, &display_info); | |
254 | virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, | |
255 | sizeof(display_info)); | |
256 | } | |
257 | ||
258 | static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format) | |
259 | { | |
260 | switch (virtio_gpu_format) { | |
62232bf4 | 261 | case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: |
a27450ec | 262 | return PIXMAN_BE_b8g8r8x8; |
62232bf4 | 263 | case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: |
a27450ec | 264 | return PIXMAN_BE_b8g8r8a8; |
62232bf4 | 265 | case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: |
a27450ec | 266 | return PIXMAN_BE_x8r8g8b8; |
62232bf4 | 267 | case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: |
a27450ec | 268 | return PIXMAN_BE_a8r8g8b8; |
62232bf4 | 269 | case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: |
a27450ec | 270 | return PIXMAN_BE_r8g8b8x8; |
62232bf4 | 271 | case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: |
a27450ec | 272 | return PIXMAN_BE_r8g8b8a8; |
62232bf4 | 273 | case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: |
a27450ec | 274 | return PIXMAN_BE_x8b8g8r8; |
62232bf4 | 275 | case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: |
a27450ec | 276 | return PIXMAN_BE_a8b8g8r8; |
62232bf4 GH |
277 | default: |
278 | return 0; | |
279 | } | |
280 | } | |
281 | ||
282 | static void virtio_gpu_resource_create_2d(VirtIOGPU *g, | |
283 | struct virtio_gpu_ctrl_command *cmd) | |
284 | { | |
285 | pixman_format_code_t pformat; | |
286 | struct virtio_gpu_simple_resource *res; | |
287 | struct virtio_gpu_resource_create_2d c2d; | |
288 | ||
289 | VIRTIO_GPU_FILL_CMD(c2d); | |
290 | trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, | |
291 | c2d.width, c2d.height); | |
292 | ||
293 | if (c2d.resource_id == 0) { | |
294 | qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", | |
295 | __func__); | |
296 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
297 | return; | |
298 | } | |
299 | ||
300 | res = virtio_gpu_find_resource(g, c2d.resource_id); | |
301 | if (res) { | |
302 | qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", | |
303 | __func__, c2d.resource_id); | |
304 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
305 | return; | |
306 | } | |
307 | ||
308 | res = g_new0(struct virtio_gpu_simple_resource, 1); | |
309 | ||
310 | res->width = c2d.width; | |
311 | res->height = c2d.height; | |
312 | res->format = c2d.format; | |
313 | res->resource_id = c2d.resource_id; | |
314 | ||
315 | pformat = get_pixman_format(c2d.format); | |
316 | if (!pformat) { | |
317 | qemu_log_mask(LOG_GUEST_ERROR, | |
318 | "%s: host couldn't handle guest format %d\n", | |
319 | __func__, c2d.format); | |
cb3a0522 | 320 | g_free(res); |
62232bf4 GH |
321 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; |
322 | return; | |
323 | } | |
9b7621bc GH |
324 | |
325 | res->hostmem = PIXMAN_FORMAT_BPP(pformat) * c2d.width * c2d.height; | |
326 | if (res->hostmem + g->hostmem < g->conf.max_hostmem) { | |
327 | res->image = pixman_image_create_bits(pformat, | |
328 | c2d.width, | |
329 | c2d.height, | |
330 | NULL, 0); | |
331 | } | |
62232bf4 GH |
332 | |
333 | if (!res->image) { | |
334 | qemu_log_mask(LOG_GUEST_ERROR, | |
335 | "%s: resource creation failed %d %d %d\n", | |
336 | __func__, c2d.resource_id, c2d.width, c2d.height); | |
337 | g_free(res); | |
338 | cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; | |
339 | return; | |
340 | } | |
341 | ||
342 | QTAILQ_INSERT_HEAD(&g->reslist, res, next); | |
9b7621bc | 343 | g->hostmem += res->hostmem; |
62232bf4 GH |
344 | } |
345 | ||
346 | static void virtio_gpu_resource_destroy(VirtIOGPU *g, | |
347 | struct virtio_gpu_simple_resource *res) | |
348 | { | |
349 | pixman_image_unref(res->image); | |
b8e23926 | 350 | virtio_gpu_cleanup_mapping(res); |
62232bf4 | 351 | QTAILQ_REMOVE(&g->reslist, res, next); |
9b7621bc | 352 | g->hostmem -= res->hostmem; |
62232bf4 GH |
353 | g_free(res); |
354 | } | |
355 | ||
356 | static void virtio_gpu_resource_unref(VirtIOGPU *g, | |
357 | struct virtio_gpu_ctrl_command *cmd) | |
358 | { | |
359 | struct virtio_gpu_simple_resource *res; | |
360 | struct virtio_gpu_resource_unref unref; | |
361 | ||
362 | VIRTIO_GPU_FILL_CMD(unref); | |
363 | trace_virtio_gpu_cmd_res_unref(unref.resource_id); | |
364 | ||
365 | res = virtio_gpu_find_resource(g, unref.resource_id); | |
366 | if (!res) { | |
367 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", | |
368 | __func__, unref.resource_id); | |
369 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
370 | return; | |
371 | } | |
372 | virtio_gpu_resource_destroy(g, res); | |
373 | } | |
374 | ||
375 | static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, | |
376 | struct virtio_gpu_ctrl_command *cmd) | |
377 | { | |
378 | struct virtio_gpu_simple_resource *res; | |
379 | int h; | |
380 | uint32_t src_offset, dst_offset, stride; | |
381 | int bpp; | |
382 | pixman_format_code_t format; | |
383 | struct virtio_gpu_transfer_to_host_2d t2d; | |
384 | ||
385 | VIRTIO_GPU_FILL_CMD(t2d); | |
386 | trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); | |
387 | ||
388 | res = virtio_gpu_find_resource(g, t2d.resource_id); | |
389 | if (!res || !res->iov) { | |
390 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", | |
391 | __func__, t2d.resource_id); | |
392 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
393 | return; | |
394 | } | |
395 | ||
396 | if (t2d.r.x > res->width || | |
397 | t2d.r.y > res->height || | |
398 | t2d.r.width > res->width || | |
399 | t2d.r.height > res->height || | |
400 | t2d.r.x + t2d.r.width > res->width || | |
401 | t2d.r.y + t2d.r.height > res->height) { | |
402 | qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" | |
403 | " bounds for resource %d: %d %d %d %d vs %d %d\n", | |
404 | __func__, t2d.resource_id, t2d.r.x, t2d.r.y, | |
405 | t2d.r.width, t2d.r.height, res->width, res->height); | |
406 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
407 | return; | |
408 | } | |
409 | ||
410 | format = pixman_image_get_format(res->image); | |
411 | bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; | |
412 | stride = pixman_image_get_stride(res->image); | |
413 | ||
414 | if (t2d.offset || t2d.r.x || t2d.r.y || | |
415 | t2d.r.width != pixman_image_get_width(res->image)) { | |
416 | void *img_data = pixman_image_get_data(res->image); | |
417 | for (h = 0; h < t2d.r.height; h++) { | |
418 | src_offset = t2d.offset + stride * h; | |
419 | dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); | |
420 | ||
421 | iov_to_buf(res->iov, res->iov_cnt, src_offset, | |
422 | (uint8_t *)img_data | |
423 | + dst_offset, t2d.r.width * bpp); | |
424 | } | |
425 | } else { | |
426 | iov_to_buf(res->iov, res->iov_cnt, 0, | |
427 | pixman_image_get_data(res->image), | |
428 | pixman_image_get_stride(res->image) | |
429 | * pixman_image_get_height(res->image)); | |
430 | } | |
431 | } | |
432 | ||
433 | static void virtio_gpu_resource_flush(VirtIOGPU *g, | |
434 | struct virtio_gpu_ctrl_command *cmd) | |
435 | { | |
436 | struct virtio_gpu_simple_resource *res; | |
437 | struct virtio_gpu_resource_flush rf; | |
438 | pixman_region16_t flush_region; | |
439 | int i; | |
440 | ||
441 | VIRTIO_GPU_FILL_CMD(rf); | |
442 | trace_virtio_gpu_cmd_res_flush(rf.resource_id, | |
443 | rf.r.width, rf.r.height, rf.r.x, rf.r.y); | |
444 | ||
445 | res = virtio_gpu_find_resource(g, rf.resource_id); | |
446 | if (!res) { | |
447 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", | |
448 | __func__, rf.resource_id); | |
449 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
450 | return; | |
451 | } | |
452 | ||
453 | if (rf.r.x > res->width || | |
454 | rf.r.y > res->height || | |
455 | rf.r.width > res->width || | |
456 | rf.r.height > res->height || | |
457 | rf.r.x + rf.r.width > res->width || | |
458 | rf.r.y + rf.r.height > res->height) { | |
459 | qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" | |
460 | " bounds for resource %d: %d %d %d %d vs %d %d\n", | |
461 | __func__, rf.resource_id, rf.r.x, rf.r.y, | |
462 | rf.r.width, rf.r.height, res->width, res->height); | |
463 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
464 | return; | |
465 | } | |
466 | ||
467 | pixman_region_init_rect(&flush_region, | |
468 | rf.r.x, rf.r.y, rf.r.width, rf.r.height); | |
2fe76055 | 469 | for (i = 0; i < g->conf.max_outputs; i++) { |
62232bf4 GH |
470 | struct virtio_gpu_scanout *scanout; |
471 | pixman_region16_t region, finalregion; | |
472 | pixman_box16_t *extents; | |
473 | ||
474 | if (!(res->scanout_bitmask & (1 << i))) { | |
475 | continue; | |
476 | } | |
477 | scanout = &g->scanout[i]; | |
478 | ||
479 | pixman_region_init(&finalregion); | |
480 | pixman_region_init_rect(®ion, scanout->x, scanout->y, | |
481 | scanout->width, scanout->height); | |
482 | ||
483 | pixman_region_intersect(&finalregion, &flush_region, ®ion); | |
484 | pixman_region_translate(&finalregion, -scanout->x, -scanout->y); | |
485 | extents = pixman_region_extents(&finalregion); | |
486 | /* work out the area we need to update for each console */ | |
487 | dpy_gfx_update(g->scanout[i].con, | |
488 | extents->x1, extents->y1, | |
489 | extents->x2 - extents->x1, | |
490 | extents->y2 - extents->y1); | |
491 | ||
492 | pixman_region_fini(®ion); | |
493 | pixman_region_fini(&finalregion); | |
494 | } | |
495 | pixman_region_fini(&flush_region); | |
496 | } | |
497 | ||
fa06e5cb GH |
498 | static void virtio_unref_resource(pixman_image_t *image, void *data) |
499 | { | |
500 | pixman_image_unref(data); | |
501 | } | |
502 | ||
62232bf4 GH |
503 | static void virtio_gpu_set_scanout(VirtIOGPU *g, |
504 | struct virtio_gpu_ctrl_command *cmd) | |
505 | { | |
506 | struct virtio_gpu_simple_resource *res; | |
507 | struct virtio_gpu_scanout *scanout; | |
508 | pixman_format_code_t format; | |
509 | uint32_t offset; | |
510 | int bpp; | |
511 | struct virtio_gpu_set_scanout ss; | |
512 | ||
513 | VIRTIO_GPU_FILL_CMD(ss); | |
514 | trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, | |
515 | ss.r.width, ss.r.height, ss.r.x, ss.r.y); | |
516 | ||
2fe76055 | 517 | if (ss.scanout_id >= g->conf.max_outputs) { |
fe89fdeb MAL |
518 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", |
519 | __func__, ss.scanout_id); | |
520 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; | |
521 | return; | |
522 | } | |
523 | ||
62232bf4 GH |
524 | g->enable = 1; |
525 | if (ss.resource_id == 0) { | |
526 | scanout = &g->scanout[ss.scanout_id]; | |
527 | if (scanout->resource_id) { | |
528 | res = virtio_gpu_find_resource(g, scanout->resource_id); | |
529 | if (res) { | |
530 | res->scanout_bitmask &= ~(1 << ss.scanout_id); | |
531 | } | |
532 | } | |
fe89fdeb | 533 | if (ss.scanout_id == 0) { |
62232bf4 GH |
534 | qemu_log_mask(LOG_GUEST_ERROR, |
535 | "%s: illegal scanout id specified %d", | |
536 | __func__, ss.scanout_id); | |
537 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; | |
538 | return; | |
539 | } | |
540 | dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); | |
541 | scanout->ds = NULL; | |
542 | scanout->width = 0; | |
543 | scanout->height = 0; | |
544 | return; | |
545 | } | |
546 | ||
547 | /* create a surface for this scanout */ | |
62232bf4 GH |
548 | res = virtio_gpu_find_resource(g, ss.resource_id); |
549 | if (!res) { | |
550 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", | |
551 | __func__, ss.resource_id); | |
552 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
553 | return; | |
554 | } | |
555 | ||
556 | if (ss.r.x > res->width || | |
557 | ss.r.y > res->height || | |
558 | ss.r.width > res->width || | |
559 | ss.r.height > res->height || | |
560 | ss.r.x + ss.r.width > res->width || | |
561 | ss.r.y + ss.r.height > res->height) { | |
562 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" | |
563 | " resource %d, (%d,%d)+%d,%d vs %d %d\n", | |
564 | __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, | |
565 | ss.r.width, ss.r.height, res->width, res->height); | |
566 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
567 | return; | |
568 | } | |
569 | ||
570 | scanout = &g->scanout[ss.scanout_id]; | |
571 | ||
572 | format = pixman_image_get_format(res->image); | |
573 | bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; | |
574 | offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image); | |
575 | if (!scanout->ds || surface_data(scanout->ds) | |
576 | != ((uint8_t *)pixman_image_get_data(res->image) + offset) || | |
577 | scanout->width != ss.r.width || | |
578 | scanout->height != ss.r.height) { | |
fa06e5cb GH |
579 | pixman_image_t *rect; |
580 | void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset; | |
581 | rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr, | |
582 | pixman_image_get_stride(res->image)); | |
583 | pixman_image_ref(res->image); | |
584 | pixman_image_set_destroy_function(rect, virtio_unref_resource, | |
585 | res->image); | |
62232bf4 | 586 | /* realloc the surface ptr */ |
fa06e5cb | 587 | scanout->ds = qemu_create_displaysurface_pixman(rect); |
62232bf4 GH |
588 | if (!scanout->ds) { |
589 | cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; | |
590 | return; | |
591 | } | |
dd248ed7 | 592 | pixman_image_unref(rect); |
62232bf4 GH |
593 | dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds); |
594 | } | |
595 | ||
596 | res->scanout_bitmask |= (1 << ss.scanout_id); | |
597 | scanout->resource_id = ss.resource_id; | |
598 | scanout->x = ss.r.x; | |
599 | scanout->y = ss.r.y; | |
600 | scanout->width = ss.r.width; | |
601 | scanout->height = ss.r.height; | |
602 | } | |
603 | ||
604 | int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, | |
605 | struct virtio_gpu_ctrl_command *cmd, | |
0c244e50 | 606 | uint64_t **addr, struct iovec **iov) |
62232bf4 GH |
607 | { |
608 | struct virtio_gpu_mem_entry *ents; | |
609 | size_t esize, s; | |
610 | int i; | |
611 | ||
612 | if (ab->nr_entries > 16384) { | |
613 | qemu_log_mask(LOG_GUEST_ERROR, | |
2c84167b | 614 | "%s: nr_entries is too big (%d > 16384)\n", |
62232bf4 GH |
615 | __func__, ab->nr_entries); |
616 | return -1; | |
617 | } | |
618 | ||
619 | esize = sizeof(*ents) * ab->nr_entries; | |
620 | ents = g_malloc(esize); | |
621 | s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, | |
622 | sizeof(*ab), ents, esize); | |
623 | if (s != esize) { | |
624 | qemu_log_mask(LOG_GUEST_ERROR, | |
625 | "%s: command data size incorrect %zu vs %zu\n", | |
626 | __func__, s, esize); | |
627 | g_free(ents); | |
628 | return -1; | |
629 | } | |
630 | ||
631 | *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); | |
0c244e50 GH |
632 | if (addr) { |
633 | *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries); | |
634 | } | |
62232bf4 GH |
635 | for (i = 0; i < ab->nr_entries; i++) { |
636 | hwaddr len = ents[i].length; | |
637 | (*iov)[i].iov_len = ents[i].length; | |
638 | (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1); | |
0c244e50 GH |
639 | if (addr) { |
640 | (*addr)[i] = ents[i].addr; | |
641 | } | |
62232bf4 GH |
642 | if (!(*iov)[i].iov_base || len != ents[i].length) { |
643 | qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" | |
644 | " resource %d element %d\n", | |
645 | __func__, ab->resource_id, i); | |
646 | virtio_gpu_cleanup_mapping_iov(*iov, i); | |
647 | g_free(ents); | |
62232bf4 | 648 | *iov = NULL; |
0c244e50 GH |
649 | if (addr) { |
650 | g_free(*addr); | |
651 | *addr = NULL; | |
652 | } | |
62232bf4 GH |
653 | return -1; |
654 | } | |
655 | } | |
656 | g_free(ents); | |
657 | return 0; | |
658 | } | |
659 | ||
660 | void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count) | |
661 | { | |
662 | int i; | |
663 | ||
664 | for (i = 0; i < count; i++) { | |
665 | cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1, | |
666 | iov[i].iov_len); | |
667 | } | |
7f3be0f2 | 668 | g_free(iov); |
62232bf4 GH |
669 | } |
670 | ||
671 | static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res) | |
672 | { | |
673 | virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt); | |
62232bf4 GH |
674 | res->iov = NULL; |
675 | res->iov_cnt = 0; | |
0c244e50 GH |
676 | g_free(res->addrs); |
677 | res->addrs = NULL; | |
62232bf4 GH |
678 | } |
679 | ||
680 | static void | |
681 | virtio_gpu_resource_attach_backing(VirtIOGPU *g, | |
682 | struct virtio_gpu_ctrl_command *cmd) | |
683 | { | |
684 | struct virtio_gpu_simple_resource *res; | |
685 | struct virtio_gpu_resource_attach_backing ab; | |
686 | int ret; | |
687 | ||
688 | VIRTIO_GPU_FILL_CMD(ab); | |
689 | trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); | |
690 | ||
691 | res = virtio_gpu_find_resource(g, ab.resource_id); | |
692 | if (!res) { | |
693 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", | |
694 | __func__, ab.resource_id); | |
695 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
696 | return; | |
697 | } | |
698 | ||
204f01b3 LQ |
699 | if (res->iov) { |
700 | cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; | |
701 | return; | |
702 | } | |
703 | ||
0c244e50 | 704 | ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->addrs, &res->iov); |
62232bf4 GH |
705 | if (ret != 0) { |
706 | cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; | |
707 | return; | |
708 | } | |
709 | ||
710 | res->iov_cnt = ab.nr_entries; | |
711 | } | |
712 | ||
713 | static void | |
714 | virtio_gpu_resource_detach_backing(VirtIOGPU *g, | |
715 | struct virtio_gpu_ctrl_command *cmd) | |
716 | { | |
717 | struct virtio_gpu_simple_resource *res; | |
718 | struct virtio_gpu_resource_detach_backing detach; | |
719 | ||
720 | VIRTIO_GPU_FILL_CMD(detach); | |
721 | trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); | |
722 | ||
723 | res = virtio_gpu_find_resource(g, detach.resource_id); | |
724 | if (!res || !res->iov) { | |
725 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", | |
726 | __func__, detach.resource_id); | |
727 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
728 | return; | |
729 | } | |
730 | virtio_gpu_cleanup_mapping(res); | |
731 | } | |
732 | ||
733 | static void virtio_gpu_simple_process_cmd(VirtIOGPU *g, | |
734 | struct virtio_gpu_ctrl_command *cmd) | |
735 | { | |
736 | VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); | |
737 | ||
738 | switch (cmd->cmd_hdr.type) { | |
739 | case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: | |
740 | virtio_gpu_get_display_info(g, cmd); | |
741 | break; | |
742 | case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: | |
743 | virtio_gpu_resource_create_2d(g, cmd); | |
744 | break; | |
745 | case VIRTIO_GPU_CMD_RESOURCE_UNREF: | |
746 | virtio_gpu_resource_unref(g, cmd); | |
747 | break; | |
748 | case VIRTIO_GPU_CMD_RESOURCE_FLUSH: | |
749 | virtio_gpu_resource_flush(g, cmd); | |
750 | break; | |
751 | case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: | |
752 | virtio_gpu_transfer_to_host_2d(g, cmd); | |
753 | break; | |
754 | case VIRTIO_GPU_CMD_SET_SCANOUT: | |
755 | virtio_gpu_set_scanout(g, cmd); | |
756 | break; | |
757 | case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: | |
758 | virtio_gpu_resource_attach_backing(g, cmd); | |
759 | break; | |
760 | case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: | |
761 | virtio_gpu_resource_detach_backing(g, cmd); | |
762 | break; | |
763 | default: | |
764 | cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; | |
765 | break; | |
766 | } | |
767 | if (!cmd->finished) { | |
768 | virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : | |
769 | VIRTIO_GPU_RESP_OK_NODATA); | |
770 | } | |
771 | } | |
772 | ||
773 | static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) | |
774 | { | |
775 | VirtIOGPU *g = VIRTIO_GPU(vdev); | |
776 | qemu_bh_schedule(g->ctrl_bh); | |
777 | } | |
778 | ||
779 | static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) | |
780 | { | |
781 | VirtIOGPU *g = VIRTIO_GPU(vdev); | |
782 | qemu_bh_schedule(g->cursor_bh); | |
783 | } | |
784 | ||
0c55a1cf | 785 | void virtio_gpu_process_cmdq(VirtIOGPU *g) |
3eb769fd GH |
786 | { |
787 | struct virtio_gpu_ctrl_command *cmd; | |
788 | ||
789 | while (!QTAILQ_EMPTY(&g->cmdq)) { | |
790 | cmd = QTAILQ_FIRST(&g->cmdq); | |
791 | ||
792 | /* process command */ | |
793 | VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd, | |
794 | g, cmd); | |
0c55a1cf GH |
795 | if (cmd->waiting) { |
796 | break; | |
797 | } | |
3eb769fd GH |
798 | QTAILQ_REMOVE(&g->cmdq, cmd, next); |
799 | if (virtio_gpu_stats_enabled(g->conf)) { | |
800 | g->stats.requests++; | |
801 | } | |
802 | ||
803 | if (!cmd->finished) { | |
804 | QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); | |
805 | g->inflight++; | |
806 | if (virtio_gpu_stats_enabled(g->conf)) { | |
807 | if (g->stats.max_inflight < g->inflight) { | |
808 | g->stats.max_inflight = g->inflight; | |
809 | } | |
810 | fprintf(stderr, "inflight: %3d (+)\r", g->inflight); | |
811 | } | |
812 | } else { | |
813 | g_free(cmd); | |
814 | } | |
815 | } | |
816 | } | |
817 | ||
62232bf4 GH |
818 | static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) |
819 | { | |
820 | VirtIOGPU *g = VIRTIO_GPU(vdev); | |
821 | struct virtio_gpu_ctrl_command *cmd; | |
822 | ||
823 | if (!virtio_queue_ready(vq)) { | |
824 | return; | |
825 | } | |
826 | ||
9d9e1521 GH |
827 | #ifdef CONFIG_VIRGL |
828 | if (!g->renderer_inited && g->use_virgl_renderer) { | |
829 | virtio_gpu_virgl_init(g); | |
830 | g->renderer_inited = true; | |
831 | } | |
832 | #endif | |
833 | ||
51b19ebe PB |
834 | cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); |
835 | while (cmd) { | |
62232bf4 GH |
836 | cmd->vq = vq; |
837 | cmd->error = 0; | |
838 | cmd->finished = false; | |
3eb769fd GH |
839 | cmd->waiting = false; |
840 | QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); | |
51b19ebe | 841 | cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); |
62232bf4 | 842 | } |
9d9e1521 | 843 | |
3eb769fd GH |
844 | virtio_gpu_process_cmdq(g); |
845 | ||
9d9e1521 GH |
846 | #ifdef CONFIG_VIRGL |
847 | if (g->use_virgl_renderer) { | |
848 | virtio_gpu_virgl_fence_poll(g); | |
849 | } | |
850 | #endif | |
62232bf4 GH |
851 | } |
852 | ||
853 | static void virtio_gpu_ctrl_bh(void *opaque) | |
854 | { | |
855 | VirtIOGPU *g = opaque; | |
856 | virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq); | |
857 | } | |
858 | ||
859 | static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) | |
860 | { | |
861 | VirtIOGPU *g = VIRTIO_GPU(vdev); | |
51b19ebe | 862 | VirtQueueElement *elem; |
62232bf4 GH |
863 | size_t s; |
864 | struct virtio_gpu_update_cursor cursor_info; | |
865 | ||
866 | if (!virtio_queue_ready(vq)) { | |
867 | return; | |
868 | } | |
51b19ebe PB |
869 | for (;;) { |
870 | elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); | |
871 | if (!elem) { | |
872 | break; | |
873 | } | |
874 | ||
875 | s = iov_to_buf(elem->out_sg, elem->out_num, 0, | |
62232bf4 GH |
876 | &cursor_info, sizeof(cursor_info)); |
877 | if (s != sizeof(cursor_info)) { | |
878 | qemu_log_mask(LOG_GUEST_ERROR, | |
879 | "%s: cursor size incorrect %zu vs %zu\n", | |
880 | __func__, s, sizeof(cursor_info)); | |
881 | } else { | |
882 | update_cursor(g, &cursor_info); | |
883 | } | |
51b19ebe | 884 | virtqueue_push(vq, elem, 0); |
62232bf4 | 885 | virtio_notify(vdev, vq); |
51b19ebe | 886 | g_free(elem); |
62232bf4 GH |
887 | } |
888 | } | |
889 | ||
890 | static void virtio_gpu_cursor_bh(void *opaque) | |
891 | { | |
892 | VirtIOGPU *g = opaque; | |
893 | virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq); | |
894 | } | |
895 | ||
896 | static void virtio_gpu_invalidate_display(void *opaque) | |
897 | { | |
898 | } | |
899 | ||
900 | static void virtio_gpu_update_display(void *opaque) | |
901 | { | |
902 | } | |
903 | ||
904 | static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) | |
905 | { | |
906 | } | |
907 | ||
908 | static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) | |
909 | { | |
910 | VirtIOGPU *g = opaque; | |
911 | ||
6b860806 | 912 | if (idx >= g->conf.max_outputs) { |
62232bf4 GH |
913 | return -1; |
914 | } | |
915 | ||
916 | g->req_state[idx].x = info->xoff; | |
917 | g->req_state[idx].y = info->yoff; | |
918 | g->req_state[idx].width = info->width; | |
919 | g->req_state[idx].height = info->height; | |
920 | ||
921 | if (info->width && info->height) { | |
922 | g->enabled_output_bitmask |= (1 << idx); | |
923 | } else { | |
924 | g->enabled_output_bitmask &= ~(1 << idx); | |
925 | } | |
926 | ||
927 | /* send event to guest */ | |
928 | virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); | |
929 | return 0; | |
930 | } | |
931 | ||
932 | const GraphicHwOps virtio_gpu_ops = { | |
933 | .invalidate = virtio_gpu_invalidate_display, | |
934 | .gfx_update = virtio_gpu_update_display, | |
935 | .text_update = virtio_gpu_text_update, | |
936 | .ui_info = virtio_gpu_ui_info, | |
c19f4fbc | 937 | #ifdef CONFIG_VIRGL |
321c9adb | 938 | .gl_block = virtio_gpu_gl_block, |
c19f4fbc | 939 | #endif |
62232bf4 GH |
940 | }; |
941 | ||
0c244e50 GH |
942 | static const VMStateDescription vmstate_virtio_gpu_scanout = { |
943 | .name = "virtio-gpu-one-scanout", | |
944 | .version_id = 1, | |
945 | .fields = (VMStateField[]) { | |
946 | VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), | |
947 | VMSTATE_UINT32(width, struct virtio_gpu_scanout), | |
948 | VMSTATE_UINT32(height, struct virtio_gpu_scanout), | |
949 | VMSTATE_INT32(x, struct virtio_gpu_scanout), | |
950 | VMSTATE_INT32(y, struct virtio_gpu_scanout), | |
951 | VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), | |
952 | VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), | |
953 | VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), | |
954 | VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), | |
955 | VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), | |
956 | VMSTATE_END_OF_LIST() | |
957 | }, | |
958 | }; | |
959 | ||
960 | static const VMStateDescription vmstate_virtio_gpu_scanouts = { | |
961 | .name = "virtio-gpu-scanouts", | |
962 | .version_id = 1, | |
963 | .fields = (VMStateField[]) { | |
964 | VMSTATE_INT32(enable, struct VirtIOGPU), | |
d2164ad3 | 965 | VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU, NULL), |
0c244e50 GH |
966 | VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU, |
967 | conf.max_outputs, 1, | |
968 | vmstate_virtio_gpu_scanout, | |
969 | struct virtio_gpu_scanout), | |
970 | VMSTATE_END_OF_LIST() | |
971 | }, | |
972 | }; | |
973 | ||
2c21ee76 JD |
974 | static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, |
975 | VMStateField *field, QJSON *vmdesc) | |
0c244e50 GH |
976 | { |
977 | VirtIOGPU *g = opaque; | |
0c244e50 GH |
978 | struct virtio_gpu_simple_resource *res; |
979 | int i; | |
980 | ||
0c244e50 GH |
981 | /* in 2d mode we should never find unprocessed commands here */ |
982 | assert(QTAILQ_EMPTY(&g->cmdq)); | |
983 | ||
984 | QTAILQ_FOREACH(res, &g->reslist, next) { | |
985 | qemu_put_be32(f, res->resource_id); | |
986 | qemu_put_be32(f, res->width); | |
987 | qemu_put_be32(f, res->height); | |
988 | qemu_put_be32(f, res->format); | |
989 | qemu_put_be32(f, res->iov_cnt); | |
990 | for (i = 0; i < res->iov_cnt; i++) { | |
991 | qemu_put_be64(f, res->addrs[i]); | |
992 | qemu_put_be32(f, res->iov[i].iov_len); | |
993 | } | |
994 | qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), | |
995 | pixman_image_get_stride(res->image) * res->height); | |
996 | } | |
997 | qemu_put_be32(f, 0); /* end of list */ | |
998 | ||
999 | vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); | |
2c21ee76 JD |
1000 | |
1001 | return 0; | |
0c244e50 GH |
1002 | } |
1003 | ||
2c21ee76 JD |
1004 | static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, |
1005 | VMStateField *field) | |
0c244e50 GH |
1006 | { |
1007 | VirtIOGPU *g = opaque; | |
0c244e50 GH |
1008 | struct virtio_gpu_simple_resource *res; |
1009 | struct virtio_gpu_scanout *scanout; | |
1010 | uint32_t resource_id, pformat; | |
8a502efd | 1011 | int i; |
0c244e50 | 1012 | |
039aa5db PM |
1013 | g->hostmem = 0; |
1014 | ||
0c244e50 GH |
1015 | resource_id = qemu_get_be32(f); |
1016 | while (resource_id != 0) { | |
1017 | res = g_new0(struct virtio_gpu_simple_resource, 1); | |
1018 | res->resource_id = resource_id; | |
1019 | res->width = qemu_get_be32(f); | |
1020 | res->height = qemu_get_be32(f); | |
1021 | res->format = qemu_get_be32(f); | |
1022 | res->iov_cnt = qemu_get_be32(f); | |
1023 | ||
1024 | /* allocate */ | |
1025 | pformat = get_pixman_format(res->format); | |
1026 | if (!pformat) { | |
c84f0f25 | 1027 | g_free(res); |
0c244e50 GH |
1028 | return -EINVAL; |
1029 | } | |
1030 | res->image = pixman_image_create_bits(pformat, | |
1031 | res->width, res->height, | |
1032 | NULL, 0); | |
1033 | if (!res->image) { | |
c84f0f25 | 1034 | g_free(res); |
0c244e50 GH |
1035 | return -EINVAL; |
1036 | } | |
1037 | ||
039aa5db PM |
1038 | res->hostmem = PIXMAN_FORMAT_BPP(pformat) * res->width * res->height; |
1039 | ||
0c244e50 GH |
1040 | res->addrs = g_new(uint64_t, res->iov_cnt); |
1041 | res->iov = g_new(struct iovec, res->iov_cnt); | |
1042 | ||
1043 | /* read data */ | |
1044 | for (i = 0; i < res->iov_cnt; i++) { | |
1045 | res->addrs[i] = qemu_get_be64(f); | |
1046 | res->iov[i].iov_len = qemu_get_be32(f); | |
1047 | } | |
1048 | qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), | |
1049 | pixman_image_get_stride(res->image) * res->height); | |
1050 | ||
1051 | /* restore mapping */ | |
1052 | for (i = 0; i < res->iov_cnt; i++) { | |
1053 | hwaddr len = res->iov[i].iov_len; | |
1054 | res->iov[i].iov_base = | |
1055 | cpu_physical_memory_map(res->addrs[i], &len, 1); | |
1056 | if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { | |
c84f0f25 PM |
1057 | /* Clean up the half-a-mapping we just created... */ |
1058 | if (res->iov[i].iov_base) { | |
1059 | cpu_physical_memory_unmap(res->iov[i].iov_base, | |
1060 | len, 0, 0); | |
1061 | } | |
1062 | /* ...and the mappings for previous loop iterations */ | |
1063 | res->iov_cnt = i; | |
1064 | virtio_gpu_cleanup_mapping(res); | |
1065 | pixman_image_unref(res->image); | |
1066 | g_free(res); | |
0c244e50 GH |
1067 | return -EINVAL; |
1068 | } | |
1069 | } | |
1070 | ||
1071 | QTAILQ_INSERT_HEAD(&g->reslist, res, next); | |
039aa5db | 1072 | g->hostmem += res->hostmem; |
0c244e50 GH |
1073 | |
1074 | resource_id = qemu_get_be32(f); | |
1075 | } | |
1076 | ||
1077 | /* load & apply scanout state */ | |
1078 | vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); | |
1079 | for (i = 0; i < g->conf.max_outputs; i++) { | |
1080 | scanout = &g->scanout[i]; | |
1081 | if (!scanout->resource_id) { | |
1082 | continue; | |
1083 | } | |
1084 | res = virtio_gpu_find_resource(g, scanout->resource_id); | |
1085 | if (!res) { | |
1086 | return -EINVAL; | |
1087 | } | |
1088 | scanout->ds = qemu_create_displaysurface_pixman(res->image); | |
1089 | if (!scanout->ds) { | |
1090 | return -EINVAL; | |
1091 | } | |
1092 | ||
1093 | dpy_gfx_replace_surface(scanout->con, scanout->ds); | |
1094 | dpy_gfx_update(scanout->con, 0, 0, scanout->width, scanout->height); | |
10750ee0 GH |
1095 | if (scanout->cursor.resource_id) { |
1096 | update_cursor(g, &scanout->cursor); | |
1097 | } | |
0c244e50 GH |
1098 | res->scanout_bitmask |= (1 << i); |
1099 | } | |
1100 | ||
1101 | return 0; | |
1102 | } | |
1103 | ||
62232bf4 GH |
1104 | static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) |
1105 | { | |
1106 | VirtIODevice *vdev = VIRTIO_DEVICE(qdev); | |
1107 | VirtIOGPU *g = VIRTIO_GPU(qdev); | |
9d9e1521 | 1108 | bool have_virgl; |
fe44dc91 | 1109 | Error *local_err = NULL; |
62232bf4 GH |
1110 | int i; |
1111 | ||
acfc4846 MAL |
1112 | if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) { |
1113 | error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS); | |
5e3d741c MAL |
1114 | return; |
1115 | } | |
1116 | ||
9d9e1521 GH |
1117 | g->use_virgl_renderer = false; |
1118 | #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN) | |
1119 | have_virgl = false; | |
1120 | #else | |
1121 | have_virgl = display_opengl; | |
1122 | #endif | |
1123 | if (!have_virgl) { | |
1124 | g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED); | |
1125 | } | |
1126 | ||
fe44dc91 AA |
1127 | if (virtio_gpu_virgl_enabled(g->conf)) { |
1128 | error_setg(&g->migration_blocker, "virgl is not yet migratable"); | |
1129 | migrate_add_blocker(g->migration_blocker, &local_err); | |
1130 | if (local_err) { | |
1131 | error_propagate(errp, local_err); | |
1132 | error_free(g->migration_blocker); | |
1133 | return; | |
1134 | } | |
1135 | } | |
1136 | ||
1137 | g->config_size = sizeof(struct virtio_gpu_config); | |
1138 | g->virtio_config.num_scanouts = g->conf.max_outputs; | |
1139 | virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, | |
1140 | g->config_size); | |
1141 | ||
729abb6a GH |
1142 | g->req_state[0].width = g->conf.xres; |
1143 | g->req_state[0].height = g->conf.yres; | |
fe44dc91 | 1144 | |
9d9e1521 GH |
1145 | if (virtio_gpu_virgl_enabled(g->conf)) { |
1146 | /* use larger control queue in 3d mode */ | |
1147 | g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb); | |
1148 | g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); | |
1149 | g->virtio_config.num_capsets = 1; | |
1150 | } else { | |
1151 | g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); | |
1152 | g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); | |
1153 | } | |
62232bf4 GH |
1154 | |
1155 | g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); | |
1156 | g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); | |
1157 | QTAILQ_INIT(&g->reslist); | |
3eb769fd | 1158 | QTAILQ_INIT(&g->cmdq); |
62232bf4 GH |
1159 | QTAILQ_INIT(&g->fenceq); |
1160 | ||
1161 | g->enabled_output_bitmask = 1; | |
1162 | g->qdev = qdev; | |
1163 | ||
1164 | for (i = 0; i < g->conf.max_outputs; i++) { | |
1165 | g->scanout[i].con = | |
1166 | graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); | |
1167 | if (i > 0) { | |
1168 | dpy_gfx_replace_surface(g->scanout[i].con, NULL); | |
1169 | } | |
1170 | } | |
1171 | } | |
1172 | ||
de889221 DDAG |
1173 | static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp) |
1174 | { | |
1175 | VirtIOGPU *g = VIRTIO_GPU(qdev); | |
1176 | if (g->migration_blocker) { | |
1177 | migrate_del_blocker(g->migration_blocker); | |
1178 | error_free(g->migration_blocker); | |
1179 | } | |
1180 | } | |
1181 | ||
62232bf4 GH |
1182 | static void virtio_gpu_instance_init(Object *obj) |
1183 | { | |
1184 | } | |
1185 | ||
1186 | static void virtio_gpu_reset(VirtIODevice *vdev) | |
1187 | { | |
1188 | VirtIOGPU *g = VIRTIO_GPU(vdev); | |
1189 | struct virtio_gpu_simple_resource *res, *tmp; | |
1190 | int i; | |
1191 | ||
1192 | g->enable = 0; | |
1193 | ||
1194 | QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { | |
1195 | virtio_gpu_resource_destroy(g, res); | |
1196 | } | |
1197 | for (i = 0; i < g->conf.max_outputs; i++) { | |
1198 | #if 0 | |
1199 | g->req_state[i].x = 0; | |
1200 | g->req_state[i].y = 0; | |
1201 | if (i == 0) { | |
1202 | g->req_state[0].width = 1024; | |
1203 | g->req_state[0].height = 768; | |
1204 | } else { | |
1205 | g->req_state[i].width = 0; | |
1206 | g->req_state[i].height = 0; | |
1207 | } | |
1208 | #endif | |
1209 | g->scanout[i].resource_id = 0; | |
1210 | g->scanout[i].width = 0; | |
1211 | g->scanout[i].height = 0; | |
1212 | g->scanout[i].x = 0; | |
1213 | g->scanout[i].y = 0; | |
1214 | g->scanout[i].ds = NULL; | |
1215 | } | |
1216 | g->enabled_output_bitmask = 1; | |
9d9e1521 GH |
1217 | |
1218 | #ifdef CONFIG_VIRGL | |
1219 | if (g->use_virgl_renderer) { | |
1220 | virtio_gpu_virgl_reset(g); | |
1221 | g->use_virgl_renderer = 0; | |
1222 | } | |
1223 | #endif | |
62232bf4 GH |
1224 | } |
1225 | ||
8a502efd HP |
1226 | /* |
1227 | * For historical reasons virtio_gpu does not adhere to virtio migration | |
1228 | * scheme as described in doc/virtio-migration.txt, in a sense that no | |
1229 | * save/load callback are provided to the core. Instead the device data | |
1230 | * is saved/loaded after the core data. | |
1231 | * | |
1232 | * Because of this we need a special vmsd. | |
1233 | */ | |
1234 | static const VMStateDescription vmstate_virtio_gpu = { | |
1235 | .name = "virtio-gpu", | |
1236 | .minimum_version_id = VIRTIO_GPU_VM_VERSION, | |
1237 | .version_id = VIRTIO_GPU_VM_VERSION, | |
1238 | .fields = (VMStateField[]) { | |
1239 | VMSTATE_VIRTIO_DEVICE /* core */, | |
1240 | { | |
1241 | .name = "virtio-gpu", | |
1242 | .info = &(const VMStateInfo) { | |
1243 | .name = "virtio-gpu", | |
1244 | .get = virtio_gpu_load, | |
1245 | .put = virtio_gpu_save, | |
1246 | }, | |
1247 | .flags = VMS_SINGLE, | |
1248 | } /* device */, | |
1249 | VMSTATE_END_OF_LIST() | |
1250 | }, | |
1251 | }; | |
0fc07498 | 1252 | |
62232bf4 | 1253 | static Property virtio_gpu_properties[] = { |
b3409a31 | 1254 | DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1), |
9b7621bc GH |
1255 | DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf.max_hostmem, |
1256 | 256 * 1024 * 1024), | |
9d9e1521 GH |
1257 | #ifdef CONFIG_VIRGL |
1258 | DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags, | |
1259 | VIRTIO_GPU_FLAG_VIRGL_ENABLED, true), | |
1260 | DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags, | |
1261 | VIRTIO_GPU_FLAG_STATS_ENABLED, false), | |
1262 | #endif | |
729abb6a GH |
1263 | DEFINE_PROP_UINT32("xres", VirtIOGPU, conf.xres, 1024), |
1264 | DEFINE_PROP_UINT32("yres", VirtIOGPU, conf.yres, 768), | |
62232bf4 GH |
1265 | DEFINE_PROP_END_OF_LIST(), |
1266 | }; | |
1267 | ||
1268 | static void virtio_gpu_class_init(ObjectClass *klass, void *data) | |
1269 | { | |
1270 | DeviceClass *dc = DEVICE_CLASS(klass); | |
1271 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); | |
1272 | ||
1273 | vdc->realize = virtio_gpu_device_realize; | |
de889221 | 1274 | vdc->unrealize = virtio_gpu_device_unrealize; |
62232bf4 GH |
1275 | vdc->get_config = virtio_gpu_get_config; |
1276 | vdc->set_config = virtio_gpu_set_config; | |
1277 | vdc->get_features = virtio_gpu_get_features; | |
9d9e1521 | 1278 | vdc->set_features = virtio_gpu_set_features; |
62232bf4 GH |
1279 | |
1280 | vdc->reset = virtio_gpu_reset; | |
1281 | ||
1282 | dc->props = virtio_gpu_properties; | |
0fc07498 | 1283 | dc->vmsd = &vmstate_virtio_gpu; |
a2056e09 | 1284 | dc->hotpluggable = false; |
62232bf4 GH |
1285 | } |
1286 | ||
1287 | static const TypeInfo virtio_gpu_info = { | |
1288 | .name = TYPE_VIRTIO_GPU, | |
1289 | .parent = TYPE_VIRTIO_DEVICE, | |
1290 | .instance_size = sizeof(VirtIOGPU), | |
1291 | .instance_init = virtio_gpu_instance_init, | |
1292 | .class_init = virtio_gpu_class_init, | |
1293 | }; | |
1294 | ||
1295 | static void virtio_register_types(void) | |
1296 | { | |
1297 | type_register_static(&virtio_gpu_info); | |
1298 | } | |
1299 | ||
1300 | type_init(virtio_register_types) | |
1301 | ||
1302 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); | |
1303 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); | |
1304 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); | |
1305 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); | |
1306 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); | |
1307 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); | |
1308 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); | |
1309 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); | |
1310 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); | |
1311 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); | |
1312 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); | |
9d9e1521 GH |
1313 | |
1314 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72); | |
1315 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72); | |
1316 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96); | |
1317 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24); | |
1318 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32); | |
1319 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32); | |
1320 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32); | |
1321 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40); | |
1322 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32); | |
1323 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24); |