]>
Commit | Line | Data |
---|---|---|
9d9e1521 GH |
1 | /* |
2 | * Virtio GPU Device | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2013-2014 | |
5 | * | |
6 | * Authors: | |
7 | * Dave Airlie <[email protected]> | |
8 | * Gerd Hoffmann <[email protected]> | |
9 | * | |
10 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
11 | * See the COPYING file in the top-level directory. | |
12 | */ | |
13 | ||
9b8bfe21 | 14 | #include "qemu/osdep.h" |
9d9e1521 GH |
15 | #include "qemu-common.h" |
16 | #include "qemu/iov.h" | |
17 | #include "trace.h" | |
18 | #include "hw/virtio/virtio.h" | |
19 | #include "hw/virtio/virtio-gpu.h" | |
20 | ||
21 | #ifdef CONFIG_VIRGL | |
22 | ||
a9c94277 | 23 | #include <virglrenderer.h> |
9d9e1521 GH |
24 | |
25 | static struct virgl_renderer_callbacks virtio_gpu_3d_cbs; | |
26 | ||
27 | static void virgl_cmd_create_resource_2d(VirtIOGPU *g, | |
28 | struct virtio_gpu_ctrl_command *cmd) | |
29 | { | |
30 | struct virtio_gpu_resource_create_2d c2d; | |
31 | struct virgl_renderer_resource_create_args args; | |
32 | ||
33 | VIRTIO_GPU_FILL_CMD(c2d); | |
34 | trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, | |
35 | c2d.width, c2d.height); | |
36 | ||
37 | args.handle = c2d.resource_id; | |
38 | args.target = 2; | |
39 | args.format = c2d.format; | |
40 | args.bind = (1 << 1); | |
41 | args.width = c2d.width; | |
42 | args.height = c2d.height; | |
43 | args.depth = 1; | |
44 | args.array_size = 1; | |
45 | args.last_level = 0; | |
46 | args.nr_samples = 0; | |
47 | args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP; | |
48 | virgl_renderer_resource_create(&args, NULL, 0); | |
49 | } | |
50 | ||
51 | static void virgl_cmd_create_resource_3d(VirtIOGPU *g, | |
52 | struct virtio_gpu_ctrl_command *cmd) | |
53 | { | |
54 | struct virtio_gpu_resource_create_3d c3d; | |
55 | struct virgl_renderer_resource_create_args args; | |
56 | ||
57 | VIRTIO_GPU_FILL_CMD(c3d); | |
58 | trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format, | |
59 | c3d.width, c3d.height, c3d.depth); | |
60 | ||
61 | args.handle = c3d.resource_id; | |
62 | args.target = c3d.target; | |
63 | args.format = c3d.format; | |
64 | args.bind = c3d.bind; | |
65 | args.width = c3d.width; | |
66 | args.height = c3d.height; | |
67 | args.depth = c3d.depth; | |
68 | args.array_size = c3d.array_size; | |
69 | args.last_level = c3d.last_level; | |
70 | args.nr_samples = c3d.nr_samples; | |
71 | args.flags = c3d.flags; | |
72 | virgl_renderer_resource_create(&args, NULL, 0); | |
73 | } | |
74 | ||
75 | static void virgl_cmd_resource_unref(VirtIOGPU *g, | |
76 | struct virtio_gpu_ctrl_command *cmd) | |
77 | { | |
78 | struct virtio_gpu_resource_unref unref; | |
5e8e3c4c GH |
79 | struct iovec *res_iovs = NULL; |
80 | int num_iovs = 0; | |
9d9e1521 GH |
81 | |
82 | VIRTIO_GPU_FILL_CMD(unref); | |
83 | trace_virtio_gpu_cmd_res_unref(unref.resource_id); | |
84 | ||
5e8e3c4c GH |
85 | virgl_renderer_resource_detach_iov(unref.resource_id, |
86 | &res_iovs, | |
87 | &num_iovs); | |
88 | if (res_iovs != NULL && num_iovs != 0) { | |
3bb68f79 | 89 | virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs); |
5e8e3c4c | 90 | } |
9d9e1521 GH |
91 | virgl_renderer_resource_unref(unref.resource_id); |
92 | } | |
93 | ||
94 | static void virgl_cmd_context_create(VirtIOGPU *g, | |
95 | struct virtio_gpu_ctrl_command *cmd) | |
96 | { | |
97 | struct virtio_gpu_ctx_create cc; | |
98 | ||
99 | VIRTIO_GPU_FILL_CMD(cc); | |
100 | trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id, | |
101 | cc.debug_name); | |
102 | ||
103 | virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen, | |
104 | cc.debug_name); | |
105 | } | |
106 | ||
107 | static void virgl_cmd_context_destroy(VirtIOGPU *g, | |
108 | struct virtio_gpu_ctrl_command *cmd) | |
109 | { | |
110 | struct virtio_gpu_ctx_destroy cd; | |
111 | ||
112 | VIRTIO_GPU_FILL_CMD(cd); | |
113 | trace_virtio_gpu_cmd_ctx_destroy(cd.hdr.ctx_id); | |
114 | ||
115 | virgl_renderer_context_destroy(cd.hdr.ctx_id); | |
116 | } | |
117 | ||
118 | static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y, | |
119 | int width, int height) | |
120 | { | |
121 | if (!g->scanout[idx].con) { | |
122 | return; | |
123 | } | |
124 | ||
125 | dpy_gl_update(g->scanout[idx].con, x, y, width, height); | |
126 | } | |
127 | ||
128 | static void virgl_cmd_resource_flush(VirtIOGPU *g, | |
129 | struct virtio_gpu_ctrl_command *cmd) | |
130 | { | |
131 | struct virtio_gpu_resource_flush rf; | |
132 | int i; | |
133 | ||
134 | VIRTIO_GPU_FILL_CMD(rf); | |
135 | trace_virtio_gpu_cmd_res_flush(rf.resource_id, | |
136 | rf.r.width, rf.r.height, rf.r.x, rf.r.y); | |
137 | ||
2fe76055 | 138 | for (i = 0; i < g->conf.max_outputs; i++) { |
9d9e1521 GH |
139 | if (g->scanout[i].resource_id != rf.resource_id) { |
140 | continue; | |
141 | } | |
142 | virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height); | |
143 | } | |
144 | } | |
145 | ||
146 | static void virgl_cmd_set_scanout(VirtIOGPU *g, | |
147 | struct virtio_gpu_ctrl_command *cmd) | |
148 | { | |
149 | struct virtio_gpu_set_scanout ss; | |
150 | struct virgl_renderer_resource_info info; | |
151 | int ret; | |
152 | ||
153 | VIRTIO_GPU_FILL_CMD(ss); | |
154 | trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, | |
155 | ss.r.width, ss.r.height, ss.r.x, ss.r.y); | |
156 | ||
2fe76055 | 157 | if (ss.scanout_id >= g->conf.max_outputs) { |
9d9e1521 GH |
158 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", |
159 | __func__, ss.scanout_id); | |
160 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; | |
161 | return; | |
162 | } | |
163 | g->enable = 1; | |
164 | ||
165 | memset(&info, 0, sizeof(info)); | |
166 | ||
167 | if (ss.resource_id && ss.r.width && ss.r.height) { | |
168 | ret = virgl_renderer_resource_get_info(ss.resource_id, &info); | |
169 | if (ret == -1) { | |
170 | qemu_log_mask(LOG_GUEST_ERROR, | |
171 | "%s: illegal resource specified %d\n", | |
172 | __func__, ss.resource_id); | |
173 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
174 | return; | |
175 | } | |
176 | qemu_console_resize(g->scanout[ss.scanout_id].con, | |
177 | ss.r.width, ss.r.height); | |
178 | virgl_renderer_force_ctx_0(); | |
f4c36bda GH |
179 | dpy_gl_scanout_texture(g->scanout[ss.scanout_id].con, info.tex_id, |
180 | info.flags & 1 /* FIXME: Y_0_TOP */, | |
181 | info.width, info.height, | |
182 | ss.r.x, ss.r.y, ss.r.width, ss.r.height); | |
9d9e1521 GH |
183 | } else { |
184 | if (ss.scanout_id != 0) { | |
185 | dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); | |
186 | } | |
975896fc | 187 | dpy_gl_scanout_disable(g->scanout[ss.scanout_id].con); |
9d9e1521 GH |
188 | } |
189 | g->scanout[ss.scanout_id].resource_id = ss.resource_id; | |
190 | } | |
191 | ||
192 | static void virgl_cmd_submit_3d(VirtIOGPU *g, | |
193 | struct virtio_gpu_ctrl_command *cmd) | |
194 | { | |
195 | struct virtio_gpu_cmd_submit cs; | |
196 | void *buf; | |
197 | size_t s; | |
198 | ||
199 | VIRTIO_GPU_FILL_CMD(cs); | |
200 | trace_virtio_gpu_cmd_ctx_submit(cs.hdr.ctx_id, cs.size); | |
201 | ||
202 | buf = g_malloc(cs.size); | |
203 | s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, | |
204 | sizeof(cs), buf, cs.size); | |
205 | if (s != cs.size) { | |
206 | qemu_log_mask(LOG_GUEST_ERROR, "%s: size mismatch (%zd/%d)", | |
207 | __func__, s, cs.size); | |
208 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
8d94c1ca | 209 | goto out; |
9d9e1521 GH |
210 | } |
211 | ||
212 | if (virtio_gpu_stats_enabled(g->conf)) { | |
213 | g->stats.req_3d++; | |
214 | g->stats.bytes_3d += cs.size; | |
215 | } | |
216 | ||
217 | virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4); | |
218 | ||
8d94c1ca | 219 | out: |
9d9e1521 GH |
220 | g_free(buf); |
221 | } | |
222 | ||
223 | static void virgl_cmd_transfer_to_host_2d(VirtIOGPU *g, | |
224 | struct virtio_gpu_ctrl_command *cmd) | |
225 | { | |
226 | struct virtio_gpu_transfer_to_host_2d t2d; | |
227 | struct virtio_gpu_box box; | |
228 | ||
229 | VIRTIO_GPU_FILL_CMD(t2d); | |
230 | trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); | |
231 | ||
232 | box.x = t2d.r.x; | |
233 | box.y = t2d.r.y; | |
234 | box.z = 0; | |
235 | box.w = t2d.r.width; | |
236 | box.h = t2d.r.height; | |
237 | box.d = 1; | |
238 | ||
239 | virgl_renderer_transfer_write_iov(t2d.resource_id, | |
240 | 0, | |
241 | 0, | |
242 | 0, | |
243 | 0, | |
244 | (struct virgl_box *)&box, | |
245 | t2d.offset, NULL, 0); | |
246 | } | |
247 | ||
248 | static void virgl_cmd_transfer_to_host_3d(VirtIOGPU *g, | |
249 | struct virtio_gpu_ctrl_command *cmd) | |
250 | { | |
251 | struct virtio_gpu_transfer_host_3d t3d; | |
252 | ||
253 | VIRTIO_GPU_FILL_CMD(t3d); | |
254 | trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d.resource_id); | |
255 | ||
256 | virgl_renderer_transfer_write_iov(t3d.resource_id, | |
257 | t3d.hdr.ctx_id, | |
258 | t3d.level, | |
259 | t3d.stride, | |
260 | t3d.layer_stride, | |
261 | (struct virgl_box *)&t3d.box, | |
262 | t3d.offset, NULL, 0); | |
263 | } | |
264 | ||
265 | static void | |
266 | virgl_cmd_transfer_from_host_3d(VirtIOGPU *g, | |
267 | struct virtio_gpu_ctrl_command *cmd) | |
268 | { | |
269 | struct virtio_gpu_transfer_host_3d tf3d; | |
270 | ||
271 | VIRTIO_GPU_FILL_CMD(tf3d); | |
272 | trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d.resource_id); | |
273 | ||
274 | virgl_renderer_transfer_read_iov(tf3d.resource_id, | |
275 | tf3d.hdr.ctx_id, | |
276 | tf3d.level, | |
277 | tf3d.stride, | |
278 | tf3d.layer_stride, | |
279 | (struct virgl_box *)&tf3d.box, | |
280 | tf3d.offset, NULL, 0); | |
281 | } | |
282 | ||
283 | ||
284 | static void virgl_resource_attach_backing(VirtIOGPU *g, | |
285 | struct virtio_gpu_ctrl_command *cmd) | |
286 | { | |
287 | struct virtio_gpu_resource_attach_backing att_rb; | |
288 | struct iovec *res_iovs; | |
289 | int ret; | |
290 | ||
291 | VIRTIO_GPU_FILL_CMD(att_rb); | |
292 | trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id); | |
293 | ||
3bb68f79 | 294 | ret = virtio_gpu_create_mapping_iov(g, &att_rb, cmd, NULL, &res_iovs); |
9d9e1521 GH |
295 | if (ret != 0) { |
296 | cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; | |
297 | return; | |
298 | } | |
299 | ||
33243031 LQ |
300 | ret = virgl_renderer_resource_attach_iov(att_rb.resource_id, |
301 | res_iovs, att_rb.nr_entries); | |
302 | ||
303 | if (ret != 0) | |
3bb68f79 | 304 | virtio_gpu_cleanup_mapping_iov(g, res_iovs, att_rb.nr_entries); |
9d9e1521 GH |
305 | } |
306 | ||
307 | static void virgl_resource_detach_backing(VirtIOGPU *g, | |
308 | struct virtio_gpu_ctrl_command *cmd) | |
309 | { | |
310 | struct virtio_gpu_resource_detach_backing detach_rb; | |
311 | struct iovec *res_iovs = NULL; | |
312 | int num_iovs = 0; | |
313 | ||
314 | VIRTIO_GPU_FILL_CMD(detach_rb); | |
315 | trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id); | |
316 | ||
317 | virgl_renderer_resource_detach_iov(detach_rb.resource_id, | |
318 | &res_iovs, | |
319 | &num_iovs); | |
320 | if (res_iovs == NULL || num_iovs == 0) { | |
321 | return; | |
322 | } | |
3bb68f79 | 323 | virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs); |
9d9e1521 GH |
324 | } |
325 | ||
326 | ||
327 | static void virgl_cmd_ctx_attach_resource(VirtIOGPU *g, | |
328 | struct virtio_gpu_ctrl_command *cmd) | |
329 | { | |
330 | struct virtio_gpu_ctx_resource att_res; | |
331 | ||
332 | VIRTIO_GPU_FILL_CMD(att_res); | |
333 | trace_virtio_gpu_cmd_ctx_res_attach(att_res.hdr.ctx_id, | |
334 | att_res.resource_id); | |
335 | ||
336 | virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id); | |
337 | } | |
338 | ||
339 | static void virgl_cmd_ctx_detach_resource(VirtIOGPU *g, | |
340 | struct virtio_gpu_ctrl_command *cmd) | |
341 | { | |
342 | struct virtio_gpu_ctx_resource det_res; | |
343 | ||
344 | VIRTIO_GPU_FILL_CMD(det_res); | |
345 | trace_virtio_gpu_cmd_ctx_res_detach(det_res.hdr.ctx_id, | |
346 | det_res.resource_id); | |
347 | ||
348 | virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id); | |
349 | } | |
350 | ||
351 | static void virgl_cmd_get_capset_info(VirtIOGPU *g, | |
352 | struct virtio_gpu_ctrl_command *cmd) | |
353 | { | |
354 | struct virtio_gpu_get_capset_info info; | |
355 | struct virtio_gpu_resp_capset_info resp; | |
356 | ||
357 | VIRTIO_GPU_FILL_CMD(info); | |
358 | ||
42a8dadc | 359 | memset(&resp, 0, sizeof(resp)); |
9d9e1521 GH |
360 | if (info.capset_index == 0) { |
361 | resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL; | |
362 | virgl_renderer_get_cap_set(resp.capset_id, | |
363 | &resp.capset_max_version, | |
364 | &resp.capset_max_size); | |
5643cc94 DA |
365 | } else if (info.capset_index == 1) { |
366 | resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2; | |
367 | virgl_renderer_get_cap_set(resp.capset_id, | |
368 | &resp.capset_max_version, | |
369 | &resp.capset_max_size); | |
9d9e1521 GH |
370 | } else { |
371 | resp.capset_max_version = 0; | |
372 | resp.capset_max_size = 0; | |
373 | } | |
374 | resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO; | |
375 | virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); | |
376 | } | |
377 | ||
378 | static void virgl_cmd_get_capset(VirtIOGPU *g, | |
379 | struct virtio_gpu_ctrl_command *cmd) | |
380 | { | |
381 | struct virtio_gpu_get_capset gc; | |
382 | struct virtio_gpu_resp_capset *resp; | |
383 | uint32_t max_ver, max_size; | |
384 | VIRTIO_GPU_FILL_CMD(gc); | |
385 | ||
386 | virgl_renderer_get_cap_set(gc.capset_id, &max_ver, | |
387 | &max_size); | |
abd7f08b PP |
388 | if (!max_size) { |
389 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
390 | return; | |
391 | } | |
9d9e1521 | 392 | |
85d9d044 | 393 | resp = g_malloc0(sizeof(*resp) + max_size); |
9d9e1521 GH |
394 | resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET; |
395 | virgl_renderer_fill_caps(gc.capset_id, | |
396 | gc.capset_version, | |
397 | (void *)resp->capset_data); | |
398 | virtio_gpu_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size); | |
399 | g_free(resp); | |
400 | } | |
401 | ||
402 | void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, | |
403 | struct virtio_gpu_ctrl_command *cmd) | |
404 | { | |
405 | VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); | |
406 | ||
321c9adb GH |
407 | cmd->waiting = g->renderer_blocked; |
408 | if (cmd->waiting) { | |
409 | return; | |
410 | } | |
411 | ||
9d9e1521 GH |
412 | virgl_renderer_force_ctx_0(); |
413 | switch (cmd->cmd_hdr.type) { | |
414 | case VIRTIO_GPU_CMD_CTX_CREATE: | |
415 | virgl_cmd_context_create(g, cmd); | |
416 | break; | |
417 | case VIRTIO_GPU_CMD_CTX_DESTROY: | |
418 | virgl_cmd_context_destroy(g, cmd); | |
419 | break; | |
420 | case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: | |
421 | virgl_cmd_create_resource_2d(g, cmd); | |
422 | break; | |
423 | case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D: | |
424 | virgl_cmd_create_resource_3d(g, cmd); | |
425 | break; | |
426 | case VIRTIO_GPU_CMD_SUBMIT_3D: | |
427 | virgl_cmd_submit_3d(g, cmd); | |
428 | break; | |
429 | case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: | |
430 | virgl_cmd_transfer_to_host_2d(g, cmd); | |
431 | break; | |
432 | case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D: | |
433 | virgl_cmd_transfer_to_host_3d(g, cmd); | |
434 | break; | |
435 | case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D: | |
436 | virgl_cmd_transfer_from_host_3d(g, cmd); | |
437 | break; | |
438 | case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: | |
439 | virgl_resource_attach_backing(g, cmd); | |
440 | break; | |
441 | case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: | |
442 | virgl_resource_detach_backing(g, cmd); | |
443 | break; | |
444 | case VIRTIO_GPU_CMD_SET_SCANOUT: | |
445 | virgl_cmd_set_scanout(g, cmd); | |
446 | break; | |
447 | case VIRTIO_GPU_CMD_RESOURCE_FLUSH: | |
448 | virgl_cmd_resource_flush(g, cmd); | |
449 | break; | |
450 | case VIRTIO_GPU_CMD_RESOURCE_UNREF: | |
451 | virgl_cmd_resource_unref(g, cmd); | |
452 | break; | |
453 | case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE: | |
454 | /* TODO add security */ | |
455 | virgl_cmd_ctx_attach_resource(g, cmd); | |
456 | break; | |
457 | case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE: | |
458 | /* TODO add security */ | |
459 | virgl_cmd_ctx_detach_resource(g, cmd); | |
460 | break; | |
461 | case VIRTIO_GPU_CMD_GET_CAPSET_INFO: | |
462 | virgl_cmd_get_capset_info(g, cmd); | |
463 | break; | |
464 | case VIRTIO_GPU_CMD_GET_CAPSET: | |
465 | virgl_cmd_get_capset(g, cmd); | |
466 | break; | |
467 | ||
468 | case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: | |
469 | virtio_gpu_get_display_info(g, cmd); | |
470 | break; | |
471 | default: | |
472 | cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; | |
473 | break; | |
474 | } | |
475 | ||
476 | if (cmd->finished) { | |
477 | return; | |
478 | } | |
479 | if (cmd->error) { | |
480 | fprintf(stderr, "%s: ctrl 0x%x, error 0x%x\n", __func__, | |
481 | cmd->cmd_hdr.type, cmd->error); | |
482 | virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error); | |
483 | return; | |
484 | } | |
485 | if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) { | |
486 | virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); | |
487 | return; | |
488 | } | |
489 | ||
490 | trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); | |
491 | virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); | |
492 | } | |
493 | ||
494 | static void virgl_write_fence(void *opaque, uint32_t fence) | |
495 | { | |
496 | VirtIOGPU *g = opaque; | |
497 | struct virtio_gpu_ctrl_command *cmd, *tmp; | |
498 | ||
499 | QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { | |
500 | /* | |
501 | * the guest can end up emitting fences out of order | |
502 | * so we should check all fenced cmds not just the first one. | |
503 | */ | |
504 | if (cmd->cmd_hdr.fence_id > fence) { | |
505 | continue; | |
506 | } | |
507 | trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id); | |
508 | virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); | |
509 | QTAILQ_REMOVE(&g->fenceq, cmd, next); | |
510 | g_free(cmd); | |
511 | g->inflight--; | |
512 | if (virtio_gpu_stats_enabled(g->conf)) { | |
513 | fprintf(stderr, "inflight: %3d (-)\r", g->inflight); | |
514 | } | |
515 | } | |
516 | } | |
517 | ||
518 | static virgl_renderer_gl_context | |
519 | virgl_create_context(void *opaque, int scanout_idx, | |
520 | struct virgl_renderer_gl_ctx_param *params) | |
521 | { | |
522 | VirtIOGPU *g = opaque; | |
523 | QEMUGLContext ctx; | |
524 | QEMUGLParams qparams; | |
525 | ||
526 | qparams.major_ver = params->major_ver; | |
527 | qparams.minor_ver = params->minor_ver; | |
528 | ||
529 | ctx = dpy_gl_ctx_create(g->scanout[scanout_idx].con, &qparams); | |
530 | return (virgl_renderer_gl_context)ctx; | |
531 | } | |
532 | ||
533 | static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx) | |
534 | { | |
535 | VirtIOGPU *g = opaque; | |
536 | QEMUGLContext qctx = (QEMUGLContext)ctx; | |
537 | ||
538 | dpy_gl_ctx_destroy(g->scanout[0].con, qctx); | |
539 | } | |
540 | ||
541 | static int virgl_make_context_current(void *opaque, int scanout_idx, | |
542 | virgl_renderer_gl_context ctx) | |
543 | { | |
544 | VirtIOGPU *g = opaque; | |
545 | QEMUGLContext qctx = (QEMUGLContext)ctx; | |
546 | ||
547 | return dpy_gl_ctx_make_current(g->scanout[scanout_idx].con, qctx); | |
548 | } | |
549 | ||
550 | static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = { | |
551 | .version = 1, | |
552 | .write_fence = virgl_write_fence, | |
553 | .create_gl_context = virgl_create_context, | |
554 | .destroy_gl_context = virgl_destroy_context, | |
555 | .make_current = virgl_make_context_current, | |
556 | }; | |
557 | ||
558 | static void virtio_gpu_print_stats(void *opaque) | |
559 | { | |
560 | VirtIOGPU *g = opaque; | |
561 | ||
562 | if (g->stats.requests) { | |
563 | fprintf(stderr, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n", | |
564 | g->stats.requests, | |
565 | g->stats.max_inflight, | |
566 | g->stats.req_3d, | |
567 | g->stats.bytes_3d); | |
568 | g->stats.requests = 0; | |
569 | g->stats.max_inflight = 0; | |
570 | g->stats.req_3d = 0; | |
571 | g->stats.bytes_3d = 0; | |
572 | } else { | |
573 | fprintf(stderr, "stats: idle\r"); | |
574 | } | |
575 | timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); | |
576 | } | |
577 | ||
578 | static void virtio_gpu_fence_poll(void *opaque) | |
579 | { | |
580 | VirtIOGPU *g = opaque; | |
581 | ||
582 | virgl_renderer_poll(); | |
0c55a1cf GH |
583 | virtio_gpu_process_cmdq(g); |
584 | if (!QTAILQ_EMPTY(&g->cmdq) || !QTAILQ_EMPTY(&g->fenceq)) { | |
9d9e1521 GH |
585 | timer_mod(g->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10); |
586 | } | |
587 | } | |
588 | ||
589 | void virtio_gpu_virgl_fence_poll(VirtIOGPU *g) | |
590 | { | |
591 | virtio_gpu_fence_poll(g); | |
592 | } | |
593 | ||
594 | void virtio_gpu_virgl_reset(VirtIOGPU *g) | |
595 | { | |
596 | int i; | |
597 | ||
598 | /* virgl_renderer_reset() ??? */ | |
599 | for (i = 0; i < g->conf.max_outputs; i++) { | |
600 | if (i != 0) { | |
601 | dpy_gfx_replace_surface(g->scanout[i].con, NULL); | |
602 | } | |
975896fc | 603 | dpy_gl_scanout_disable(g->scanout[i].con); |
9d9e1521 GH |
604 | } |
605 | } | |
606 | ||
c19f4fbc GH |
607 | void virtio_gpu_gl_block(void *opaque, bool block) |
608 | { | |
609 | VirtIOGPU *g = opaque; | |
610 | ||
611 | if (block) { | |
612 | g->renderer_blocked++; | |
613 | } else { | |
614 | g->renderer_blocked--; | |
615 | } | |
616 | assert(g->renderer_blocked >= 0); | |
617 | ||
618 | if (g->renderer_blocked == 0) { | |
619 | virtio_gpu_process_cmdq(g); | |
620 | } | |
621 | } | |
622 | ||
9d9e1521 GH |
623 | int virtio_gpu_virgl_init(VirtIOGPU *g) |
624 | { | |
625 | int ret; | |
626 | ||
627 | ret = virgl_renderer_init(g, 0, &virtio_gpu_3d_cbs); | |
628 | if (ret != 0) { | |
629 | return ret; | |
630 | } | |
631 | ||
632 | g->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL, | |
633 | virtio_gpu_fence_poll, g); | |
634 | ||
635 | if (virtio_gpu_stats_enabled(g->conf)) { | |
636 | g->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL, | |
637 | virtio_gpu_print_stats, g); | |
638 | timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); | |
639 | } | |
640 | return 0; | |
641 | } | |
642 | ||
5643cc94 DA |
643 | int virtio_gpu_virgl_get_num_capsets(VirtIOGPU *g) |
644 | { | |
645 | uint32_t capset2_max_ver, capset2_max_size; | |
646 | virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2, | |
647 | &capset2_max_ver, | |
648 | &capset2_max_size); | |
649 | ||
650 | return capset2_max_ver ? 2 : 1; | |
651 | } | |
652 | ||
9d9e1521 | 653 | #endif /* CONFIG_VIRGL */ |