]>
Commit | Line | Data |
---|---|---|
62232bf4 GH |
1 | /* |
2 | * Virtio GPU Device | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2013-2014 | |
5 | * | |
6 | * Authors: | |
7 | * Dave Airlie <[email protected]> | |
8 | * Gerd Hoffmann <[email protected]> | |
9 | * | |
2e252145 | 10 | * This work is licensed under the terms of the GNU GPL, version 2 or later. |
62232bf4 GH |
11 | * See the COPYING file in the top-level directory. |
12 | */ | |
13 | ||
9b8bfe21 | 14 | #include "qemu/osdep.h" |
62232bf4 GH |
15 | #include "qemu-common.h" |
16 | #include "qemu/iov.h" | |
17 | #include "ui/console.h" | |
18 | #include "trace.h" | |
19 | #include "hw/virtio/virtio.h" | |
20 | #include "hw/virtio/virtio-gpu.h" | |
21 | #include "hw/virtio/virtio-bus.h" | |
03dd024f | 22 | #include "qemu/log.h" |
5e3d741c | 23 | #include "qapi/error.h" |
62232bf4 | 24 | |
0c244e50 GH |
25 | #define VIRTIO_GPU_VM_VERSION 1 |
26 | ||
62232bf4 GH |
27 | static struct virtio_gpu_simple_resource* |
28 | virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); | |
29 | ||
9d9e1521 | 30 | #ifdef CONFIG_VIRGL |
a9c94277 | 31 | #include <virglrenderer.h> |
9d9e1521 GH |
32 | #define VIRGL(_g, _virgl, _simple, ...) \ |
33 | do { \ | |
34 | if (_g->use_virgl_renderer) { \ | |
35 | _virgl(__VA_ARGS__); \ | |
36 | } else { \ | |
37 | _simple(__VA_ARGS__); \ | |
38 | } \ | |
39 | } while (0) | |
40 | #else | |
41 | #define VIRGL(_g, _virgl, _simple, ...) \ | |
42 | do { \ | |
43 | _simple(__VA_ARGS__); \ | |
44 | } while (0) | |
45 | #endif | |
46 | ||
62232bf4 GH |
47 | static void update_cursor_data_simple(VirtIOGPU *g, |
48 | struct virtio_gpu_scanout *s, | |
49 | uint32_t resource_id) | |
50 | { | |
51 | struct virtio_gpu_simple_resource *res; | |
52 | uint32_t pixels; | |
53 | ||
54 | res = virtio_gpu_find_resource(g, resource_id); | |
55 | if (!res) { | |
56 | return; | |
57 | } | |
58 | ||
59 | if (pixman_image_get_width(res->image) != s->current_cursor->width || | |
60 | pixman_image_get_height(res->image) != s->current_cursor->height) { | |
61 | return; | |
62 | } | |
63 | ||
64 | pixels = s->current_cursor->width * s->current_cursor->height; | |
65 | memcpy(s->current_cursor->data, | |
66 | pixman_image_get_data(res->image), | |
67 | pixels * sizeof(uint32_t)); | |
68 | } | |
69 | ||
9d9e1521 GH |
70 | #ifdef CONFIG_VIRGL |
71 | ||
72 | static void update_cursor_data_virgl(VirtIOGPU *g, | |
73 | struct virtio_gpu_scanout *s, | |
74 | uint32_t resource_id) | |
75 | { | |
76 | uint32_t width, height; | |
77 | uint32_t pixels, *data; | |
78 | ||
79 | data = virgl_renderer_get_cursor_data(resource_id, &width, &height); | |
80 | if (!data) { | |
81 | return; | |
82 | } | |
83 | ||
84 | if (width != s->current_cursor->width || | |
85 | height != s->current_cursor->height) { | |
86 | return; | |
87 | } | |
88 | ||
89 | pixels = s->current_cursor->width * s->current_cursor->height; | |
90 | memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t)); | |
91 | free(data); | |
92 | } | |
93 | ||
94 | #endif | |
95 | ||
62232bf4 GH |
96 | static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) |
97 | { | |
98 | struct virtio_gpu_scanout *s; | |
0c244e50 | 99 | bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; |
62232bf4 GH |
100 | |
101 | if (cursor->pos.scanout_id >= g->conf.max_outputs) { | |
102 | return; | |
103 | } | |
104 | s = &g->scanout[cursor->pos.scanout_id]; | |
105 | ||
e9c1b459 GH |
106 | trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, |
107 | cursor->pos.x, | |
108 | cursor->pos.y, | |
109 | move ? "move" : "update", | |
110 | cursor->resource_id); | |
111 | ||
0c244e50 | 112 | if (!move) { |
62232bf4 GH |
113 | if (!s->current_cursor) { |
114 | s->current_cursor = cursor_alloc(64, 64); | |
115 | } | |
116 | ||
117 | s->current_cursor->hot_x = cursor->hot_x; | |
118 | s->current_cursor->hot_y = cursor->hot_y; | |
119 | ||
120 | if (cursor->resource_id > 0) { | |
9d9e1521 GH |
121 | VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple, |
122 | g, s, cursor->resource_id); | |
62232bf4 GH |
123 | } |
124 | dpy_cursor_define(s->con, s->current_cursor); | |
0c244e50 GH |
125 | |
126 | s->cursor = *cursor; | |
127 | } else { | |
128 | s->cursor.pos.x = cursor->pos.x; | |
129 | s->cursor.pos.y = cursor->pos.y; | |
62232bf4 GH |
130 | } |
131 | dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, | |
132 | cursor->resource_id ? 1 : 0); | |
133 | } | |
134 | ||
135 | static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) | |
136 | { | |
137 | VirtIOGPU *g = VIRTIO_GPU(vdev); | |
138 | memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); | |
139 | } | |
140 | ||
141 | static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) | |
142 | { | |
143 | VirtIOGPU *g = VIRTIO_GPU(vdev); | |
144 | struct virtio_gpu_config vgconfig; | |
145 | ||
146 | memcpy(&vgconfig, config, sizeof(g->virtio_config)); | |
147 | ||
148 | if (vgconfig.events_clear) { | |
149 | g->virtio_config.events_read &= ~vgconfig.events_clear; | |
150 | } | |
151 | } | |
152 | ||
9d5b731d JW |
153 | static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features, |
154 | Error **errp) | |
62232bf4 | 155 | { |
9d9e1521 GH |
156 | VirtIOGPU *g = VIRTIO_GPU(vdev); |
157 | ||
158 | if (virtio_gpu_virgl_enabled(g->conf)) { | |
fff02bc0 | 159 | features |= (1 << VIRTIO_GPU_F_VIRGL); |
9d9e1521 | 160 | } |
62232bf4 GH |
161 | return features; |
162 | } | |
163 | ||
9d9e1521 GH |
164 | static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features) |
165 | { | |
fff02bc0 | 166 | static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL); |
9d9e1521 GH |
167 | VirtIOGPU *g = VIRTIO_GPU(vdev); |
168 | ||
169 | g->use_virgl_renderer = ((features & virgl) == virgl); | |
170 | trace_virtio_gpu_features(g->use_virgl_renderer); | |
171 | } | |
172 | ||
62232bf4 GH |
173 | static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type) |
174 | { | |
175 | g->virtio_config.events_read |= event_type; | |
176 | virtio_notify_config(&g->parent_obj); | |
177 | } | |
178 | ||
179 | static struct virtio_gpu_simple_resource * | |
180 | virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) | |
181 | { | |
182 | struct virtio_gpu_simple_resource *res; | |
183 | ||
184 | QTAILQ_FOREACH(res, &g->reslist, next) { | |
185 | if (res->resource_id == resource_id) { | |
186 | return res; | |
187 | } | |
188 | } | |
189 | return NULL; | |
190 | } | |
191 | ||
192 | void virtio_gpu_ctrl_response(VirtIOGPU *g, | |
193 | struct virtio_gpu_ctrl_command *cmd, | |
194 | struct virtio_gpu_ctrl_hdr *resp, | |
195 | size_t resp_len) | |
196 | { | |
197 | size_t s; | |
198 | ||
199 | if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { | |
200 | resp->flags |= VIRTIO_GPU_FLAG_FENCE; | |
201 | resp->fence_id = cmd->cmd_hdr.fence_id; | |
202 | resp->ctx_id = cmd->cmd_hdr.ctx_id; | |
203 | } | |
204 | s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); | |
205 | if (s != resp_len) { | |
206 | qemu_log_mask(LOG_GUEST_ERROR, | |
207 | "%s: response size incorrect %zu vs %zu\n", | |
208 | __func__, s, resp_len); | |
209 | } | |
210 | virtqueue_push(cmd->vq, &cmd->elem, s); | |
211 | virtio_notify(VIRTIO_DEVICE(g), cmd->vq); | |
212 | cmd->finished = true; | |
213 | } | |
214 | ||
215 | void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, | |
216 | struct virtio_gpu_ctrl_command *cmd, | |
217 | enum virtio_gpu_ctrl_type type) | |
218 | { | |
219 | struct virtio_gpu_ctrl_hdr resp; | |
220 | ||
221 | memset(&resp, 0, sizeof(resp)); | |
222 | resp.type = type; | |
223 | virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); | |
224 | } | |
225 | ||
226 | static void | |
227 | virtio_gpu_fill_display_info(VirtIOGPU *g, | |
228 | struct virtio_gpu_resp_display_info *dpy_info) | |
229 | { | |
230 | int i; | |
231 | ||
232 | for (i = 0; i < g->conf.max_outputs; i++) { | |
233 | if (g->enabled_output_bitmask & (1 << i)) { | |
234 | dpy_info->pmodes[i].enabled = 1; | |
235 | dpy_info->pmodes[i].r.width = g->req_state[i].width; | |
236 | dpy_info->pmodes[i].r.height = g->req_state[i].height; | |
237 | } | |
238 | } | |
239 | } | |
240 | ||
241 | void virtio_gpu_get_display_info(VirtIOGPU *g, | |
242 | struct virtio_gpu_ctrl_command *cmd) | |
243 | { | |
244 | struct virtio_gpu_resp_display_info display_info; | |
245 | ||
246 | trace_virtio_gpu_cmd_get_display_info(); | |
247 | memset(&display_info, 0, sizeof(display_info)); | |
248 | display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; | |
249 | virtio_gpu_fill_display_info(g, &display_info); | |
250 | virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, | |
251 | sizeof(display_info)); | |
252 | } | |
253 | ||
254 | static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format) | |
255 | { | |
256 | switch (virtio_gpu_format) { | |
257 | #ifdef HOST_WORDS_BIGENDIAN | |
258 | case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: | |
259 | return PIXMAN_b8g8r8x8; | |
260 | case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: | |
261 | return PIXMAN_b8g8r8a8; | |
262 | case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: | |
263 | return PIXMAN_x8r8g8b8; | |
264 | case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: | |
265 | return PIXMAN_a8r8g8b8; | |
266 | case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: | |
267 | return PIXMAN_r8g8b8x8; | |
268 | case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: | |
269 | return PIXMAN_r8g8b8a8; | |
270 | case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: | |
271 | return PIXMAN_x8b8g8r8; | |
272 | case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: | |
273 | return PIXMAN_a8b8g8r8; | |
274 | #else | |
275 | case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: | |
276 | return PIXMAN_x8r8g8b8; | |
277 | case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: | |
278 | return PIXMAN_a8r8g8b8; | |
279 | case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: | |
280 | return PIXMAN_b8g8r8x8; | |
281 | case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: | |
282 | return PIXMAN_b8g8r8a8; | |
283 | case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: | |
284 | return PIXMAN_x8b8g8r8; | |
285 | case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: | |
286 | return PIXMAN_a8b8g8r8; | |
287 | case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: | |
288 | return PIXMAN_r8g8b8x8; | |
289 | case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: | |
290 | return PIXMAN_r8g8b8a8; | |
291 | #endif | |
292 | default: | |
293 | return 0; | |
294 | } | |
295 | } | |
296 | ||
297 | static void virtio_gpu_resource_create_2d(VirtIOGPU *g, | |
298 | struct virtio_gpu_ctrl_command *cmd) | |
299 | { | |
300 | pixman_format_code_t pformat; | |
301 | struct virtio_gpu_simple_resource *res; | |
302 | struct virtio_gpu_resource_create_2d c2d; | |
303 | ||
304 | VIRTIO_GPU_FILL_CMD(c2d); | |
305 | trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, | |
306 | c2d.width, c2d.height); | |
307 | ||
308 | if (c2d.resource_id == 0) { | |
309 | qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", | |
310 | __func__); | |
311 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
312 | return; | |
313 | } | |
314 | ||
315 | res = virtio_gpu_find_resource(g, c2d.resource_id); | |
316 | if (res) { | |
317 | qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", | |
318 | __func__, c2d.resource_id); | |
319 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
320 | return; | |
321 | } | |
322 | ||
323 | res = g_new0(struct virtio_gpu_simple_resource, 1); | |
324 | ||
325 | res->width = c2d.width; | |
326 | res->height = c2d.height; | |
327 | res->format = c2d.format; | |
328 | res->resource_id = c2d.resource_id; | |
329 | ||
330 | pformat = get_pixman_format(c2d.format); | |
331 | if (!pformat) { | |
332 | qemu_log_mask(LOG_GUEST_ERROR, | |
333 | "%s: host couldn't handle guest format %d\n", | |
334 | __func__, c2d.format); | |
335 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
336 | return; | |
337 | } | |
338 | res->image = pixman_image_create_bits(pformat, | |
339 | c2d.width, | |
340 | c2d.height, | |
341 | NULL, 0); | |
342 | ||
343 | if (!res->image) { | |
344 | qemu_log_mask(LOG_GUEST_ERROR, | |
345 | "%s: resource creation failed %d %d %d\n", | |
346 | __func__, c2d.resource_id, c2d.width, c2d.height); | |
347 | g_free(res); | |
348 | cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; | |
349 | return; | |
350 | } | |
351 | ||
352 | QTAILQ_INSERT_HEAD(&g->reslist, res, next); | |
353 | } | |
354 | ||
355 | static void virtio_gpu_resource_destroy(VirtIOGPU *g, | |
356 | struct virtio_gpu_simple_resource *res) | |
357 | { | |
358 | pixman_image_unref(res->image); | |
359 | QTAILQ_REMOVE(&g->reslist, res, next); | |
360 | g_free(res); | |
361 | } | |
362 | ||
363 | static void virtio_gpu_resource_unref(VirtIOGPU *g, | |
364 | struct virtio_gpu_ctrl_command *cmd) | |
365 | { | |
366 | struct virtio_gpu_simple_resource *res; | |
367 | struct virtio_gpu_resource_unref unref; | |
368 | ||
369 | VIRTIO_GPU_FILL_CMD(unref); | |
370 | trace_virtio_gpu_cmd_res_unref(unref.resource_id); | |
371 | ||
372 | res = virtio_gpu_find_resource(g, unref.resource_id); | |
373 | if (!res) { | |
374 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", | |
375 | __func__, unref.resource_id); | |
376 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
377 | return; | |
378 | } | |
379 | virtio_gpu_resource_destroy(g, res); | |
380 | } | |
381 | ||
382 | static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, | |
383 | struct virtio_gpu_ctrl_command *cmd) | |
384 | { | |
385 | struct virtio_gpu_simple_resource *res; | |
386 | int h; | |
387 | uint32_t src_offset, dst_offset, stride; | |
388 | int bpp; | |
389 | pixman_format_code_t format; | |
390 | struct virtio_gpu_transfer_to_host_2d t2d; | |
391 | ||
392 | VIRTIO_GPU_FILL_CMD(t2d); | |
393 | trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); | |
394 | ||
395 | res = virtio_gpu_find_resource(g, t2d.resource_id); | |
396 | if (!res || !res->iov) { | |
397 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", | |
398 | __func__, t2d.resource_id); | |
399 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
400 | return; | |
401 | } | |
402 | ||
403 | if (t2d.r.x > res->width || | |
404 | t2d.r.y > res->height || | |
405 | t2d.r.width > res->width || | |
406 | t2d.r.height > res->height || | |
407 | t2d.r.x + t2d.r.width > res->width || | |
408 | t2d.r.y + t2d.r.height > res->height) { | |
409 | qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" | |
410 | " bounds for resource %d: %d %d %d %d vs %d %d\n", | |
411 | __func__, t2d.resource_id, t2d.r.x, t2d.r.y, | |
412 | t2d.r.width, t2d.r.height, res->width, res->height); | |
413 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
414 | return; | |
415 | } | |
416 | ||
417 | format = pixman_image_get_format(res->image); | |
418 | bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; | |
419 | stride = pixman_image_get_stride(res->image); | |
420 | ||
421 | if (t2d.offset || t2d.r.x || t2d.r.y || | |
422 | t2d.r.width != pixman_image_get_width(res->image)) { | |
423 | void *img_data = pixman_image_get_data(res->image); | |
424 | for (h = 0; h < t2d.r.height; h++) { | |
425 | src_offset = t2d.offset + stride * h; | |
426 | dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); | |
427 | ||
428 | iov_to_buf(res->iov, res->iov_cnt, src_offset, | |
429 | (uint8_t *)img_data | |
430 | + dst_offset, t2d.r.width * bpp); | |
431 | } | |
432 | } else { | |
433 | iov_to_buf(res->iov, res->iov_cnt, 0, | |
434 | pixman_image_get_data(res->image), | |
435 | pixman_image_get_stride(res->image) | |
436 | * pixman_image_get_height(res->image)); | |
437 | } | |
438 | } | |
439 | ||
440 | static void virtio_gpu_resource_flush(VirtIOGPU *g, | |
441 | struct virtio_gpu_ctrl_command *cmd) | |
442 | { | |
443 | struct virtio_gpu_simple_resource *res; | |
444 | struct virtio_gpu_resource_flush rf; | |
445 | pixman_region16_t flush_region; | |
446 | int i; | |
447 | ||
448 | VIRTIO_GPU_FILL_CMD(rf); | |
449 | trace_virtio_gpu_cmd_res_flush(rf.resource_id, | |
450 | rf.r.width, rf.r.height, rf.r.x, rf.r.y); | |
451 | ||
452 | res = virtio_gpu_find_resource(g, rf.resource_id); | |
453 | if (!res) { | |
454 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", | |
455 | __func__, rf.resource_id); | |
456 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
457 | return; | |
458 | } | |
459 | ||
460 | if (rf.r.x > res->width || | |
461 | rf.r.y > res->height || | |
462 | rf.r.width > res->width || | |
463 | rf.r.height > res->height || | |
464 | rf.r.x + rf.r.width > res->width || | |
465 | rf.r.y + rf.r.height > res->height) { | |
466 | qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" | |
467 | " bounds for resource %d: %d %d %d %d vs %d %d\n", | |
468 | __func__, rf.resource_id, rf.r.x, rf.r.y, | |
469 | rf.r.width, rf.r.height, res->width, res->height); | |
470 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
471 | return; | |
472 | } | |
473 | ||
474 | pixman_region_init_rect(&flush_region, | |
475 | rf.r.x, rf.r.y, rf.r.width, rf.r.height); | |
2fe76055 | 476 | for (i = 0; i < g->conf.max_outputs; i++) { |
62232bf4 GH |
477 | struct virtio_gpu_scanout *scanout; |
478 | pixman_region16_t region, finalregion; | |
479 | pixman_box16_t *extents; | |
480 | ||
481 | if (!(res->scanout_bitmask & (1 << i))) { | |
482 | continue; | |
483 | } | |
484 | scanout = &g->scanout[i]; | |
485 | ||
486 | pixman_region_init(&finalregion); | |
487 | pixman_region_init_rect(®ion, scanout->x, scanout->y, | |
488 | scanout->width, scanout->height); | |
489 | ||
490 | pixman_region_intersect(&finalregion, &flush_region, ®ion); | |
491 | pixman_region_translate(&finalregion, -scanout->x, -scanout->y); | |
492 | extents = pixman_region_extents(&finalregion); | |
493 | /* work out the area we need to update for each console */ | |
494 | dpy_gfx_update(g->scanout[i].con, | |
495 | extents->x1, extents->y1, | |
496 | extents->x2 - extents->x1, | |
497 | extents->y2 - extents->y1); | |
498 | ||
499 | pixman_region_fini(®ion); | |
500 | pixman_region_fini(&finalregion); | |
501 | } | |
502 | pixman_region_fini(&flush_region); | |
503 | } | |
504 | ||
fa06e5cb GH |
505 | static void virtio_unref_resource(pixman_image_t *image, void *data) |
506 | { | |
507 | pixman_image_unref(data); | |
508 | } | |
509 | ||
62232bf4 GH |
510 | static void virtio_gpu_set_scanout(VirtIOGPU *g, |
511 | struct virtio_gpu_ctrl_command *cmd) | |
512 | { | |
513 | struct virtio_gpu_simple_resource *res; | |
514 | struct virtio_gpu_scanout *scanout; | |
515 | pixman_format_code_t format; | |
516 | uint32_t offset; | |
517 | int bpp; | |
518 | struct virtio_gpu_set_scanout ss; | |
519 | ||
520 | VIRTIO_GPU_FILL_CMD(ss); | |
521 | trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, | |
522 | ss.r.width, ss.r.height, ss.r.x, ss.r.y); | |
523 | ||
2fe76055 | 524 | if (ss.scanout_id >= g->conf.max_outputs) { |
fe89fdeb MAL |
525 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", |
526 | __func__, ss.scanout_id); | |
527 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; | |
528 | return; | |
529 | } | |
530 | ||
62232bf4 GH |
531 | g->enable = 1; |
532 | if (ss.resource_id == 0) { | |
533 | scanout = &g->scanout[ss.scanout_id]; | |
534 | if (scanout->resource_id) { | |
535 | res = virtio_gpu_find_resource(g, scanout->resource_id); | |
536 | if (res) { | |
537 | res->scanout_bitmask &= ~(1 << ss.scanout_id); | |
538 | } | |
539 | } | |
fe89fdeb | 540 | if (ss.scanout_id == 0) { |
62232bf4 GH |
541 | qemu_log_mask(LOG_GUEST_ERROR, |
542 | "%s: illegal scanout id specified %d", | |
543 | __func__, ss.scanout_id); | |
544 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; | |
545 | return; | |
546 | } | |
547 | dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); | |
548 | scanout->ds = NULL; | |
549 | scanout->width = 0; | |
550 | scanout->height = 0; | |
551 | return; | |
552 | } | |
553 | ||
554 | /* create a surface for this scanout */ | |
62232bf4 GH |
555 | res = virtio_gpu_find_resource(g, ss.resource_id); |
556 | if (!res) { | |
557 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", | |
558 | __func__, ss.resource_id); | |
559 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
560 | return; | |
561 | } | |
562 | ||
563 | if (ss.r.x > res->width || | |
564 | ss.r.y > res->height || | |
565 | ss.r.width > res->width || | |
566 | ss.r.height > res->height || | |
567 | ss.r.x + ss.r.width > res->width || | |
568 | ss.r.y + ss.r.height > res->height) { | |
569 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" | |
570 | " resource %d, (%d,%d)+%d,%d vs %d %d\n", | |
571 | __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, | |
572 | ss.r.width, ss.r.height, res->width, res->height); | |
573 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
574 | return; | |
575 | } | |
576 | ||
577 | scanout = &g->scanout[ss.scanout_id]; | |
578 | ||
579 | format = pixman_image_get_format(res->image); | |
580 | bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; | |
581 | offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image); | |
582 | if (!scanout->ds || surface_data(scanout->ds) | |
583 | != ((uint8_t *)pixman_image_get_data(res->image) + offset) || | |
584 | scanout->width != ss.r.width || | |
585 | scanout->height != ss.r.height) { | |
fa06e5cb GH |
586 | pixman_image_t *rect; |
587 | void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset; | |
588 | rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr, | |
589 | pixman_image_get_stride(res->image)); | |
590 | pixman_image_ref(res->image); | |
591 | pixman_image_set_destroy_function(rect, virtio_unref_resource, | |
592 | res->image); | |
62232bf4 | 593 | /* realloc the surface ptr */ |
fa06e5cb | 594 | scanout->ds = qemu_create_displaysurface_pixman(rect); |
62232bf4 GH |
595 | if (!scanout->ds) { |
596 | cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; | |
597 | return; | |
598 | } | |
599 | dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds); | |
600 | } | |
601 | ||
602 | res->scanout_bitmask |= (1 << ss.scanout_id); | |
603 | scanout->resource_id = ss.resource_id; | |
604 | scanout->x = ss.r.x; | |
605 | scanout->y = ss.r.y; | |
606 | scanout->width = ss.r.width; | |
607 | scanout->height = ss.r.height; | |
608 | } | |
609 | ||
610 | int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, | |
611 | struct virtio_gpu_ctrl_command *cmd, | |
0c244e50 | 612 | uint64_t **addr, struct iovec **iov) |
62232bf4 GH |
613 | { |
614 | struct virtio_gpu_mem_entry *ents; | |
615 | size_t esize, s; | |
616 | int i; | |
617 | ||
618 | if (ab->nr_entries > 16384) { | |
619 | qemu_log_mask(LOG_GUEST_ERROR, | |
2c84167b | 620 | "%s: nr_entries is too big (%d > 16384)\n", |
62232bf4 GH |
621 | __func__, ab->nr_entries); |
622 | return -1; | |
623 | } | |
624 | ||
625 | esize = sizeof(*ents) * ab->nr_entries; | |
626 | ents = g_malloc(esize); | |
627 | s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, | |
628 | sizeof(*ab), ents, esize); | |
629 | if (s != esize) { | |
630 | qemu_log_mask(LOG_GUEST_ERROR, | |
631 | "%s: command data size incorrect %zu vs %zu\n", | |
632 | __func__, s, esize); | |
633 | g_free(ents); | |
634 | return -1; | |
635 | } | |
636 | ||
637 | *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); | |
0c244e50 GH |
638 | if (addr) { |
639 | *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries); | |
640 | } | |
62232bf4 GH |
641 | for (i = 0; i < ab->nr_entries; i++) { |
642 | hwaddr len = ents[i].length; | |
643 | (*iov)[i].iov_len = ents[i].length; | |
644 | (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1); | |
0c244e50 GH |
645 | if (addr) { |
646 | (*addr)[i] = ents[i].addr; | |
647 | } | |
62232bf4 GH |
648 | if (!(*iov)[i].iov_base || len != ents[i].length) { |
649 | qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" | |
650 | " resource %d element %d\n", | |
651 | __func__, ab->resource_id, i); | |
652 | virtio_gpu_cleanup_mapping_iov(*iov, i); | |
653 | g_free(ents); | |
62232bf4 | 654 | *iov = NULL; |
0c244e50 GH |
655 | if (addr) { |
656 | g_free(*addr); | |
657 | *addr = NULL; | |
658 | } | |
62232bf4 GH |
659 | return -1; |
660 | } | |
661 | } | |
662 | g_free(ents); | |
663 | return 0; | |
664 | } | |
665 | ||
666 | void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count) | |
667 | { | |
668 | int i; | |
669 | ||
670 | for (i = 0; i < count; i++) { | |
671 | cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1, | |
672 | iov[i].iov_len); | |
673 | } | |
7f3be0f2 | 674 | g_free(iov); |
62232bf4 GH |
675 | } |
676 | ||
677 | static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res) | |
678 | { | |
679 | virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt); | |
62232bf4 GH |
680 | res->iov = NULL; |
681 | res->iov_cnt = 0; | |
0c244e50 GH |
682 | g_free(res->addrs); |
683 | res->addrs = NULL; | |
62232bf4 GH |
684 | } |
685 | ||
686 | static void | |
687 | virtio_gpu_resource_attach_backing(VirtIOGPU *g, | |
688 | struct virtio_gpu_ctrl_command *cmd) | |
689 | { | |
690 | struct virtio_gpu_simple_resource *res; | |
691 | struct virtio_gpu_resource_attach_backing ab; | |
692 | int ret; | |
693 | ||
694 | VIRTIO_GPU_FILL_CMD(ab); | |
695 | trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); | |
696 | ||
697 | res = virtio_gpu_find_resource(g, ab.resource_id); | |
698 | if (!res) { | |
699 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", | |
700 | __func__, ab.resource_id); | |
701 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
702 | return; | |
703 | } | |
704 | ||
0c244e50 | 705 | ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->addrs, &res->iov); |
62232bf4 GH |
706 | if (ret != 0) { |
707 | cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; | |
708 | return; | |
709 | } | |
710 | ||
711 | res->iov_cnt = ab.nr_entries; | |
712 | } | |
713 | ||
714 | static void | |
715 | virtio_gpu_resource_detach_backing(VirtIOGPU *g, | |
716 | struct virtio_gpu_ctrl_command *cmd) | |
717 | { | |
718 | struct virtio_gpu_simple_resource *res; | |
719 | struct virtio_gpu_resource_detach_backing detach; | |
720 | ||
721 | VIRTIO_GPU_FILL_CMD(detach); | |
722 | trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); | |
723 | ||
724 | res = virtio_gpu_find_resource(g, detach.resource_id); | |
725 | if (!res || !res->iov) { | |
726 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", | |
727 | __func__, detach.resource_id); | |
728 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
729 | return; | |
730 | } | |
731 | virtio_gpu_cleanup_mapping(res); | |
732 | } | |
733 | ||
734 | static void virtio_gpu_simple_process_cmd(VirtIOGPU *g, | |
735 | struct virtio_gpu_ctrl_command *cmd) | |
736 | { | |
737 | VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); | |
738 | ||
739 | switch (cmd->cmd_hdr.type) { | |
740 | case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: | |
741 | virtio_gpu_get_display_info(g, cmd); | |
742 | break; | |
743 | case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: | |
744 | virtio_gpu_resource_create_2d(g, cmd); | |
745 | break; | |
746 | case VIRTIO_GPU_CMD_RESOURCE_UNREF: | |
747 | virtio_gpu_resource_unref(g, cmd); | |
748 | break; | |
749 | case VIRTIO_GPU_CMD_RESOURCE_FLUSH: | |
750 | virtio_gpu_resource_flush(g, cmd); | |
751 | break; | |
752 | case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: | |
753 | virtio_gpu_transfer_to_host_2d(g, cmd); | |
754 | break; | |
755 | case VIRTIO_GPU_CMD_SET_SCANOUT: | |
756 | virtio_gpu_set_scanout(g, cmd); | |
757 | break; | |
758 | case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: | |
759 | virtio_gpu_resource_attach_backing(g, cmd); | |
760 | break; | |
761 | case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: | |
762 | virtio_gpu_resource_detach_backing(g, cmd); | |
763 | break; | |
764 | default: | |
765 | cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; | |
766 | break; | |
767 | } | |
768 | if (!cmd->finished) { | |
769 | virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : | |
770 | VIRTIO_GPU_RESP_OK_NODATA); | |
771 | } | |
772 | } | |
773 | ||
774 | static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) | |
775 | { | |
776 | VirtIOGPU *g = VIRTIO_GPU(vdev); | |
777 | qemu_bh_schedule(g->ctrl_bh); | |
778 | } | |
779 | ||
780 | static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) | |
781 | { | |
782 | VirtIOGPU *g = VIRTIO_GPU(vdev); | |
783 | qemu_bh_schedule(g->cursor_bh); | |
784 | } | |
785 | ||
0c55a1cf | 786 | void virtio_gpu_process_cmdq(VirtIOGPU *g) |
3eb769fd GH |
787 | { |
788 | struct virtio_gpu_ctrl_command *cmd; | |
789 | ||
790 | while (!QTAILQ_EMPTY(&g->cmdq)) { | |
791 | cmd = QTAILQ_FIRST(&g->cmdq); | |
792 | ||
793 | /* process command */ | |
794 | VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd, | |
795 | g, cmd); | |
0c55a1cf GH |
796 | if (cmd->waiting) { |
797 | break; | |
798 | } | |
3eb769fd GH |
799 | QTAILQ_REMOVE(&g->cmdq, cmd, next); |
800 | if (virtio_gpu_stats_enabled(g->conf)) { | |
801 | g->stats.requests++; | |
802 | } | |
803 | ||
804 | if (!cmd->finished) { | |
805 | QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); | |
806 | g->inflight++; | |
807 | if (virtio_gpu_stats_enabled(g->conf)) { | |
808 | if (g->stats.max_inflight < g->inflight) { | |
809 | g->stats.max_inflight = g->inflight; | |
810 | } | |
811 | fprintf(stderr, "inflight: %3d (+)\r", g->inflight); | |
812 | } | |
813 | } else { | |
814 | g_free(cmd); | |
815 | } | |
816 | } | |
817 | } | |
818 | ||
62232bf4 GH |
819 | static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) |
820 | { | |
821 | VirtIOGPU *g = VIRTIO_GPU(vdev); | |
822 | struct virtio_gpu_ctrl_command *cmd; | |
823 | ||
824 | if (!virtio_queue_ready(vq)) { | |
825 | return; | |
826 | } | |
827 | ||
9d9e1521 GH |
828 | #ifdef CONFIG_VIRGL |
829 | if (!g->renderer_inited && g->use_virgl_renderer) { | |
830 | virtio_gpu_virgl_init(g); | |
831 | g->renderer_inited = true; | |
832 | } | |
833 | #endif | |
834 | ||
51b19ebe PB |
835 | cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); |
836 | while (cmd) { | |
62232bf4 GH |
837 | cmd->vq = vq; |
838 | cmd->error = 0; | |
839 | cmd->finished = false; | |
3eb769fd GH |
840 | cmd->waiting = false; |
841 | QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); | |
51b19ebe | 842 | cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); |
62232bf4 | 843 | } |
9d9e1521 | 844 | |
3eb769fd GH |
845 | virtio_gpu_process_cmdq(g); |
846 | ||
9d9e1521 GH |
847 | #ifdef CONFIG_VIRGL |
848 | if (g->use_virgl_renderer) { | |
849 | virtio_gpu_virgl_fence_poll(g); | |
850 | } | |
851 | #endif | |
62232bf4 GH |
852 | } |
853 | ||
854 | static void virtio_gpu_ctrl_bh(void *opaque) | |
855 | { | |
856 | VirtIOGPU *g = opaque; | |
857 | virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq); | |
858 | } | |
859 | ||
860 | static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) | |
861 | { | |
862 | VirtIOGPU *g = VIRTIO_GPU(vdev); | |
51b19ebe | 863 | VirtQueueElement *elem; |
62232bf4 GH |
864 | size_t s; |
865 | struct virtio_gpu_update_cursor cursor_info; | |
866 | ||
867 | if (!virtio_queue_ready(vq)) { | |
868 | return; | |
869 | } | |
51b19ebe PB |
870 | for (;;) { |
871 | elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); | |
872 | if (!elem) { | |
873 | break; | |
874 | } | |
875 | ||
876 | s = iov_to_buf(elem->out_sg, elem->out_num, 0, | |
62232bf4 GH |
877 | &cursor_info, sizeof(cursor_info)); |
878 | if (s != sizeof(cursor_info)) { | |
879 | qemu_log_mask(LOG_GUEST_ERROR, | |
880 | "%s: cursor size incorrect %zu vs %zu\n", | |
881 | __func__, s, sizeof(cursor_info)); | |
882 | } else { | |
883 | update_cursor(g, &cursor_info); | |
884 | } | |
51b19ebe | 885 | virtqueue_push(vq, elem, 0); |
62232bf4 | 886 | virtio_notify(vdev, vq); |
51b19ebe | 887 | g_free(elem); |
62232bf4 GH |
888 | } |
889 | } | |
890 | ||
891 | static void virtio_gpu_cursor_bh(void *opaque) | |
892 | { | |
893 | VirtIOGPU *g = opaque; | |
894 | virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq); | |
895 | } | |
896 | ||
897 | static void virtio_gpu_invalidate_display(void *opaque) | |
898 | { | |
899 | } | |
900 | ||
901 | static void virtio_gpu_update_display(void *opaque) | |
902 | { | |
903 | } | |
904 | ||
905 | static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) | |
906 | { | |
907 | } | |
908 | ||
909 | static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) | |
910 | { | |
911 | VirtIOGPU *g = opaque; | |
912 | ||
6b860806 | 913 | if (idx >= g->conf.max_outputs) { |
62232bf4 GH |
914 | return -1; |
915 | } | |
916 | ||
917 | g->req_state[idx].x = info->xoff; | |
918 | g->req_state[idx].y = info->yoff; | |
919 | g->req_state[idx].width = info->width; | |
920 | g->req_state[idx].height = info->height; | |
921 | ||
922 | if (info->width && info->height) { | |
923 | g->enabled_output_bitmask |= (1 << idx); | |
924 | } else { | |
925 | g->enabled_output_bitmask &= ~(1 << idx); | |
926 | } | |
927 | ||
928 | /* send event to guest */ | |
929 | virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); | |
930 | return 0; | |
931 | } | |
932 | ||
321c9adb GH |
933 | static void virtio_gpu_gl_block(void *opaque, bool block) |
934 | { | |
935 | VirtIOGPU *g = opaque; | |
936 | ||
c540128f MAL |
937 | if (block) { |
938 | g->renderer_blocked++; | |
939 | } else { | |
940 | g->renderer_blocked--; | |
941 | } | |
942 | assert(g->renderer_blocked >= 0); | |
943 | ||
944 | if (g->renderer_blocked == 0) { | |
321c9adb GH |
945 | virtio_gpu_process_cmdq(g); |
946 | } | |
947 | } | |
948 | ||
62232bf4 GH |
949 | const GraphicHwOps virtio_gpu_ops = { |
950 | .invalidate = virtio_gpu_invalidate_display, | |
951 | .gfx_update = virtio_gpu_update_display, | |
952 | .text_update = virtio_gpu_text_update, | |
953 | .ui_info = virtio_gpu_ui_info, | |
321c9adb | 954 | .gl_block = virtio_gpu_gl_block, |
62232bf4 GH |
955 | }; |
956 | ||
0c244e50 GH |
957 | static const VMStateDescription vmstate_virtio_gpu_scanout = { |
958 | .name = "virtio-gpu-one-scanout", | |
959 | .version_id = 1, | |
960 | .fields = (VMStateField[]) { | |
961 | VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), | |
962 | VMSTATE_UINT32(width, struct virtio_gpu_scanout), | |
963 | VMSTATE_UINT32(height, struct virtio_gpu_scanout), | |
964 | VMSTATE_INT32(x, struct virtio_gpu_scanout), | |
965 | VMSTATE_INT32(y, struct virtio_gpu_scanout), | |
966 | VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), | |
967 | VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), | |
968 | VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), | |
969 | VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), | |
970 | VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), | |
971 | VMSTATE_END_OF_LIST() | |
972 | }, | |
973 | }; | |
974 | ||
975 | static const VMStateDescription vmstate_virtio_gpu_scanouts = { | |
976 | .name = "virtio-gpu-scanouts", | |
977 | .version_id = 1, | |
978 | .fields = (VMStateField[]) { | |
979 | VMSTATE_INT32(enable, struct VirtIOGPU), | |
980 | VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU), | |
981 | VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU, | |
982 | conf.max_outputs, 1, | |
983 | vmstate_virtio_gpu_scanout, | |
984 | struct virtio_gpu_scanout), | |
985 | VMSTATE_END_OF_LIST() | |
986 | }, | |
987 | }; | |
988 | ||
fa49e465 | 989 | static const VMStateDescription vmstate_virtio_gpu_unmigratable = { |
0c244e50 | 990 | .name = "virtio-gpu-with-virgl", |
fa49e465 GH |
991 | .unmigratable = 1, |
992 | }; | |
993 | ||
0c244e50 GH |
994 | static void virtio_gpu_save(QEMUFile *f, void *opaque) |
995 | { | |
996 | VirtIOGPU *g = opaque; | |
997 | VirtIODevice *vdev = VIRTIO_DEVICE(g); | |
998 | struct virtio_gpu_simple_resource *res; | |
999 | int i; | |
1000 | ||
1001 | virtio_save(vdev, f); | |
1002 | ||
1003 | /* in 2d mode we should never find unprocessed commands here */ | |
1004 | assert(QTAILQ_EMPTY(&g->cmdq)); | |
1005 | ||
1006 | QTAILQ_FOREACH(res, &g->reslist, next) { | |
1007 | qemu_put_be32(f, res->resource_id); | |
1008 | qemu_put_be32(f, res->width); | |
1009 | qemu_put_be32(f, res->height); | |
1010 | qemu_put_be32(f, res->format); | |
1011 | qemu_put_be32(f, res->iov_cnt); | |
1012 | for (i = 0; i < res->iov_cnt; i++) { | |
1013 | qemu_put_be64(f, res->addrs[i]); | |
1014 | qemu_put_be32(f, res->iov[i].iov_len); | |
1015 | } | |
1016 | qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), | |
1017 | pixman_image_get_stride(res->image) * res->height); | |
1018 | } | |
1019 | qemu_put_be32(f, 0); /* end of list */ | |
1020 | ||
1021 | vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); | |
1022 | } | |
1023 | ||
1024 | static int virtio_gpu_load(QEMUFile *f, void *opaque, int version_id) | |
1025 | { | |
1026 | VirtIOGPU *g = opaque; | |
1027 | VirtIODevice *vdev = VIRTIO_DEVICE(g); | |
1028 | struct virtio_gpu_simple_resource *res; | |
1029 | struct virtio_gpu_scanout *scanout; | |
1030 | uint32_t resource_id, pformat; | |
1031 | int i, ret; | |
1032 | ||
1033 | if (version_id != VIRTIO_GPU_VM_VERSION) { | |
1034 | return -EINVAL; | |
1035 | } | |
1036 | ||
1037 | ret = virtio_load(vdev, f, version_id); | |
1038 | if (ret) { | |
1039 | return ret; | |
1040 | } | |
1041 | ||
1042 | resource_id = qemu_get_be32(f); | |
1043 | while (resource_id != 0) { | |
1044 | res = g_new0(struct virtio_gpu_simple_resource, 1); | |
1045 | res->resource_id = resource_id; | |
1046 | res->width = qemu_get_be32(f); | |
1047 | res->height = qemu_get_be32(f); | |
1048 | res->format = qemu_get_be32(f); | |
1049 | res->iov_cnt = qemu_get_be32(f); | |
1050 | ||
1051 | /* allocate */ | |
1052 | pformat = get_pixman_format(res->format); | |
1053 | if (!pformat) { | |
1054 | return -EINVAL; | |
1055 | } | |
1056 | res->image = pixman_image_create_bits(pformat, | |
1057 | res->width, res->height, | |
1058 | NULL, 0); | |
1059 | if (!res->image) { | |
1060 | return -EINVAL; | |
1061 | } | |
1062 | ||
1063 | res->addrs = g_new(uint64_t, res->iov_cnt); | |
1064 | res->iov = g_new(struct iovec, res->iov_cnt); | |
1065 | ||
1066 | /* read data */ | |
1067 | for (i = 0; i < res->iov_cnt; i++) { | |
1068 | res->addrs[i] = qemu_get_be64(f); | |
1069 | res->iov[i].iov_len = qemu_get_be32(f); | |
1070 | } | |
1071 | qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), | |
1072 | pixman_image_get_stride(res->image) * res->height); | |
1073 | ||
1074 | /* restore mapping */ | |
1075 | for (i = 0; i < res->iov_cnt; i++) { | |
1076 | hwaddr len = res->iov[i].iov_len; | |
1077 | res->iov[i].iov_base = | |
1078 | cpu_physical_memory_map(res->addrs[i], &len, 1); | |
1079 | if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { | |
1080 | return -EINVAL; | |
1081 | } | |
1082 | } | |
1083 | ||
1084 | QTAILQ_INSERT_HEAD(&g->reslist, res, next); | |
1085 | ||
1086 | resource_id = qemu_get_be32(f); | |
1087 | } | |
1088 | ||
1089 | /* load & apply scanout state */ | |
1090 | vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); | |
1091 | for (i = 0; i < g->conf.max_outputs; i++) { | |
1092 | scanout = &g->scanout[i]; | |
1093 | if (!scanout->resource_id) { | |
1094 | continue; | |
1095 | } | |
1096 | res = virtio_gpu_find_resource(g, scanout->resource_id); | |
1097 | if (!res) { | |
1098 | return -EINVAL; | |
1099 | } | |
1100 | scanout->ds = qemu_create_displaysurface_pixman(res->image); | |
1101 | if (!scanout->ds) { | |
1102 | return -EINVAL; | |
1103 | } | |
1104 | ||
1105 | dpy_gfx_replace_surface(scanout->con, scanout->ds); | |
1106 | dpy_gfx_update(scanout->con, 0, 0, scanout->width, scanout->height); | |
1107 | update_cursor(g, &scanout->cursor); | |
1108 | res->scanout_bitmask |= (1 << i); | |
1109 | } | |
1110 | ||
1111 | return 0; | |
1112 | } | |
1113 | ||
62232bf4 GH |
1114 | static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) |
1115 | { | |
1116 | VirtIODevice *vdev = VIRTIO_DEVICE(qdev); | |
1117 | VirtIOGPU *g = VIRTIO_GPU(qdev); | |
9d9e1521 | 1118 | bool have_virgl; |
62232bf4 GH |
1119 | int i; |
1120 | ||
acfc4846 MAL |
1121 | if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) { |
1122 | error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS); | |
5e3d741c MAL |
1123 | return; |
1124 | } | |
1125 | ||
62232bf4 GH |
1126 | g->config_size = sizeof(struct virtio_gpu_config); |
1127 | g->virtio_config.num_scanouts = g->conf.max_outputs; | |
1128 | virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, | |
1129 | g->config_size); | |
1130 | ||
1131 | g->req_state[0].width = 1024; | |
1132 | g->req_state[0].height = 768; | |
1133 | ||
9d9e1521 GH |
1134 | g->use_virgl_renderer = false; |
1135 | #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN) | |
1136 | have_virgl = false; | |
1137 | #else | |
1138 | have_virgl = display_opengl; | |
1139 | #endif | |
1140 | if (!have_virgl) { | |
1141 | g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED); | |
1142 | } | |
1143 | ||
1144 | if (virtio_gpu_virgl_enabled(g->conf)) { | |
1145 | /* use larger control queue in 3d mode */ | |
1146 | g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb); | |
1147 | g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); | |
1148 | g->virtio_config.num_capsets = 1; | |
1149 | } else { | |
1150 | g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); | |
1151 | g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); | |
1152 | } | |
62232bf4 GH |
1153 | |
1154 | g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); | |
1155 | g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); | |
1156 | QTAILQ_INIT(&g->reslist); | |
3eb769fd | 1157 | QTAILQ_INIT(&g->cmdq); |
62232bf4 GH |
1158 | QTAILQ_INIT(&g->fenceq); |
1159 | ||
1160 | g->enabled_output_bitmask = 1; | |
1161 | g->qdev = qdev; | |
1162 | ||
1163 | for (i = 0; i < g->conf.max_outputs; i++) { | |
1164 | g->scanout[i].con = | |
1165 | graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); | |
1166 | if (i > 0) { | |
1167 | dpy_gfx_replace_surface(g->scanout[i].con, NULL); | |
1168 | } | |
1169 | } | |
fa49e465 | 1170 | |
0c244e50 GH |
1171 | if (virtio_gpu_virgl_enabled(g->conf)) { |
1172 | vmstate_register(qdev, -1, &vmstate_virtio_gpu_unmigratable, g); | |
1173 | } else { | |
1174 | register_savevm(qdev, "virtio-gpu", -1, VIRTIO_GPU_VM_VERSION, | |
1175 | virtio_gpu_save, virtio_gpu_load, g); | |
1176 | } | |
62232bf4 GH |
1177 | } |
1178 | ||
1179 | static void virtio_gpu_instance_init(Object *obj) | |
1180 | { | |
1181 | } | |
1182 | ||
1183 | static void virtio_gpu_reset(VirtIODevice *vdev) | |
1184 | { | |
1185 | VirtIOGPU *g = VIRTIO_GPU(vdev); | |
1186 | struct virtio_gpu_simple_resource *res, *tmp; | |
1187 | int i; | |
1188 | ||
1189 | g->enable = 0; | |
1190 | ||
1191 | QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { | |
1192 | virtio_gpu_resource_destroy(g, res); | |
1193 | } | |
1194 | for (i = 0; i < g->conf.max_outputs; i++) { | |
1195 | #if 0 | |
1196 | g->req_state[i].x = 0; | |
1197 | g->req_state[i].y = 0; | |
1198 | if (i == 0) { | |
1199 | g->req_state[0].width = 1024; | |
1200 | g->req_state[0].height = 768; | |
1201 | } else { | |
1202 | g->req_state[i].width = 0; | |
1203 | g->req_state[i].height = 0; | |
1204 | } | |
1205 | #endif | |
1206 | g->scanout[i].resource_id = 0; | |
1207 | g->scanout[i].width = 0; | |
1208 | g->scanout[i].height = 0; | |
1209 | g->scanout[i].x = 0; | |
1210 | g->scanout[i].y = 0; | |
1211 | g->scanout[i].ds = NULL; | |
1212 | } | |
1213 | g->enabled_output_bitmask = 1; | |
9d9e1521 GH |
1214 | |
1215 | #ifdef CONFIG_VIRGL | |
1216 | if (g->use_virgl_renderer) { | |
1217 | virtio_gpu_virgl_reset(g); | |
1218 | g->use_virgl_renderer = 0; | |
1219 | } | |
1220 | #endif | |
62232bf4 GH |
1221 | } |
1222 | ||
1223 | static Property virtio_gpu_properties[] = { | |
b3409a31 | 1224 | DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1), |
9d9e1521 GH |
1225 | #ifdef CONFIG_VIRGL |
1226 | DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags, | |
1227 | VIRTIO_GPU_FLAG_VIRGL_ENABLED, true), | |
1228 | DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags, | |
1229 | VIRTIO_GPU_FLAG_STATS_ENABLED, false), | |
1230 | #endif | |
62232bf4 GH |
1231 | DEFINE_PROP_END_OF_LIST(), |
1232 | }; | |
1233 | ||
1234 | static void virtio_gpu_class_init(ObjectClass *klass, void *data) | |
1235 | { | |
1236 | DeviceClass *dc = DEVICE_CLASS(klass); | |
1237 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); | |
1238 | ||
1239 | vdc->realize = virtio_gpu_device_realize; | |
1240 | vdc->get_config = virtio_gpu_get_config; | |
1241 | vdc->set_config = virtio_gpu_set_config; | |
1242 | vdc->get_features = virtio_gpu_get_features; | |
9d9e1521 | 1243 | vdc->set_features = virtio_gpu_set_features; |
62232bf4 GH |
1244 | |
1245 | vdc->reset = virtio_gpu_reset; | |
1246 | ||
1247 | dc->props = virtio_gpu_properties; | |
1248 | } | |
1249 | ||
1250 | static const TypeInfo virtio_gpu_info = { | |
1251 | .name = TYPE_VIRTIO_GPU, | |
1252 | .parent = TYPE_VIRTIO_DEVICE, | |
1253 | .instance_size = sizeof(VirtIOGPU), | |
1254 | .instance_init = virtio_gpu_instance_init, | |
1255 | .class_init = virtio_gpu_class_init, | |
1256 | }; | |
1257 | ||
1258 | static void virtio_register_types(void) | |
1259 | { | |
1260 | type_register_static(&virtio_gpu_info); | |
1261 | } | |
1262 | ||
1263 | type_init(virtio_register_types) | |
1264 | ||
1265 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); | |
1266 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); | |
1267 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); | |
1268 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); | |
1269 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); | |
1270 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); | |
1271 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); | |
1272 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); | |
1273 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); | |
1274 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); | |
1275 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); | |
9d9e1521 GH |
1276 | |
1277 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72); | |
1278 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72); | |
1279 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96); | |
1280 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24); | |
1281 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32); | |
1282 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32); | |
1283 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32); | |
1284 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40); | |
1285 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32); | |
1286 | QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24); |