]>
Commit | Line | Data |
---|---|---|
9d9e1521 GH |
1 | /* |
2 | * Virtio GPU Device | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2013-2014 | |
5 | * | |
6 | * Authors: | |
7 | * Dave Airlie <[email protected]> | |
8 | * Gerd Hoffmann <[email protected]> | |
9 | * | |
10 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
11 | * See the COPYING file in the top-level directory. | |
12 | */ | |
13 | ||
9b8bfe21 | 14 | #include "qemu/osdep.h" |
9d9e1521 GH |
15 | #include "qemu-common.h" |
16 | #include "qemu/iov.h" | |
17 | #include "trace.h" | |
18 | #include "hw/virtio/virtio.h" | |
19 | #include "hw/virtio/virtio-gpu.h" | |
d0f0c865 | 20 | #include "qapi/error.h" |
9d9e1521 GH |
21 | |
22 | #ifdef CONFIG_VIRGL | |
23 | ||
a9c94277 | 24 | #include <virglrenderer.h> |
9d9e1521 GH |
25 | |
26 | static struct virgl_renderer_callbacks virtio_gpu_3d_cbs; | |
27 | ||
28 | static void virgl_cmd_create_resource_2d(VirtIOGPU *g, | |
29 | struct virtio_gpu_ctrl_command *cmd) | |
30 | { | |
31 | struct virtio_gpu_resource_create_2d c2d; | |
32 | struct virgl_renderer_resource_create_args args; | |
33 | ||
34 | VIRTIO_GPU_FILL_CMD(c2d); | |
35 | trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, | |
36 | c2d.width, c2d.height); | |
37 | ||
38 | args.handle = c2d.resource_id; | |
39 | args.target = 2; | |
40 | args.format = c2d.format; | |
41 | args.bind = (1 << 1); | |
42 | args.width = c2d.width; | |
43 | args.height = c2d.height; | |
44 | args.depth = 1; | |
45 | args.array_size = 1; | |
46 | args.last_level = 0; | |
47 | args.nr_samples = 0; | |
48 | args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP; | |
49 | virgl_renderer_resource_create(&args, NULL, 0); | |
50 | } | |
51 | ||
52 | static void virgl_cmd_create_resource_3d(VirtIOGPU *g, | |
53 | struct virtio_gpu_ctrl_command *cmd) | |
54 | { | |
55 | struct virtio_gpu_resource_create_3d c3d; | |
56 | struct virgl_renderer_resource_create_args args; | |
57 | ||
58 | VIRTIO_GPU_FILL_CMD(c3d); | |
59 | trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format, | |
60 | c3d.width, c3d.height, c3d.depth); | |
61 | ||
62 | args.handle = c3d.resource_id; | |
63 | args.target = c3d.target; | |
64 | args.format = c3d.format; | |
65 | args.bind = c3d.bind; | |
66 | args.width = c3d.width; | |
67 | args.height = c3d.height; | |
68 | args.depth = c3d.depth; | |
69 | args.array_size = c3d.array_size; | |
70 | args.last_level = c3d.last_level; | |
71 | args.nr_samples = c3d.nr_samples; | |
72 | args.flags = c3d.flags; | |
73 | virgl_renderer_resource_create(&args, NULL, 0); | |
74 | } | |
75 | ||
76 | static void virgl_cmd_resource_unref(VirtIOGPU *g, | |
77 | struct virtio_gpu_ctrl_command *cmd) | |
78 | { | |
79 | struct virtio_gpu_resource_unref unref; | |
80 | ||
81 | VIRTIO_GPU_FILL_CMD(unref); | |
82 | trace_virtio_gpu_cmd_res_unref(unref.resource_id); | |
83 | ||
84 | virgl_renderer_resource_unref(unref.resource_id); | |
85 | } | |
86 | ||
87 | static void virgl_cmd_context_create(VirtIOGPU *g, | |
88 | struct virtio_gpu_ctrl_command *cmd) | |
89 | { | |
90 | struct virtio_gpu_ctx_create cc; | |
91 | ||
92 | VIRTIO_GPU_FILL_CMD(cc); | |
93 | trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id, | |
94 | cc.debug_name); | |
95 | ||
96 | virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen, | |
97 | cc.debug_name); | |
98 | } | |
99 | ||
100 | static void virgl_cmd_context_destroy(VirtIOGPU *g, | |
101 | struct virtio_gpu_ctrl_command *cmd) | |
102 | { | |
103 | struct virtio_gpu_ctx_destroy cd; | |
104 | ||
105 | VIRTIO_GPU_FILL_CMD(cd); | |
106 | trace_virtio_gpu_cmd_ctx_destroy(cd.hdr.ctx_id); | |
107 | ||
108 | virgl_renderer_context_destroy(cd.hdr.ctx_id); | |
109 | } | |
110 | ||
111 | static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y, | |
112 | int width, int height) | |
113 | { | |
114 | if (!g->scanout[idx].con) { | |
115 | return; | |
116 | } | |
117 | ||
118 | dpy_gl_update(g->scanout[idx].con, x, y, width, height); | |
119 | } | |
120 | ||
121 | static void virgl_cmd_resource_flush(VirtIOGPU *g, | |
122 | struct virtio_gpu_ctrl_command *cmd) | |
123 | { | |
124 | struct virtio_gpu_resource_flush rf; | |
125 | int i; | |
126 | ||
127 | VIRTIO_GPU_FILL_CMD(rf); | |
128 | trace_virtio_gpu_cmd_res_flush(rf.resource_id, | |
129 | rf.r.width, rf.r.height, rf.r.x, rf.r.y); | |
130 | ||
2fe76055 | 131 | for (i = 0; i < g->conf.max_outputs; i++) { |
9d9e1521 GH |
132 | if (g->scanout[i].resource_id != rf.resource_id) { |
133 | continue; | |
134 | } | |
135 | virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height); | |
136 | } | |
137 | } | |
138 | ||
139 | static void virgl_cmd_set_scanout(VirtIOGPU *g, | |
140 | struct virtio_gpu_ctrl_command *cmd) | |
141 | { | |
142 | struct virtio_gpu_set_scanout ss; | |
143 | struct virgl_renderer_resource_info info; | |
144 | int ret; | |
145 | ||
146 | VIRTIO_GPU_FILL_CMD(ss); | |
147 | trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, | |
148 | ss.r.width, ss.r.height, ss.r.x, ss.r.y); | |
149 | ||
2fe76055 | 150 | if (ss.scanout_id >= g->conf.max_outputs) { |
9d9e1521 GH |
151 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", |
152 | __func__, ss.scanout_id); | |
153 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; | |
154 | return; | |
155 | } | |
156 | g->enable = 1; | |
157 | ||
158 | memset(&info, 0, sizeof(info)); | |
159 | ||
160 | if (ss.resource_id && ss.r.width && ss.r.height) { | |
161 | ret = virgl_renderer_resource_get_info(ss.resource_id, &info); | |
162 | if (ret == -1) { | |
163 | qemu_log_mask(LOG_GUEST_ERROR, | |
164 | "%s: illegal resource specified %d\n", | |
165 | __func__, ss.resource_id); | |
166 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
167 | return; | |
168 | } | |
169 | qemu_console_resize(g->scanout[ss.scanout_id].con, | |
170 | ss.r.width, ss.r.height); | |
171 | virgl_renderer_force_ctx_0(); | |
172 | dpy_gl_scanout(g->scanout[ss.scanout_id].con, info.tex_id, | |
173 | info.flags & 1 /* FIXME: Y_0_TOP */, | |
9d8256eb | 174 | info.width, info.height, |
9d9e1521 GH |
175 | ss.r.x, ss.r.y, ss.r.width, ss.r.height); |
176 | } else { | |
177 | if (ss.scanout_id != 0) { | |
178 | dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); | |
179 | } | |
180 | dpy_gl_scanout(g->scanout[ss.scanout_id].con, 0, false, | |
9d8256eb | 181 | 0, 0, 0, 0, 0, 0); |
9d9e1521 GH |
182 | } |
183 | g->scanout[ss.scanout_id].resource_id = ss.resource_id; | |
184 | } | |
185 | ||
186 | static void virgl_cmd_submit_3d(VirtIOGPU *g, | |
187 | struct virtio_gpu_ctrl_command *cmd) | |
188 | { | |
189 | struct virtio_gpu_cmd_submit cs; | |
190 | void *buf; | |
191 | size_t s; | |
192 | ||
193 | VIRTIO_GPU_FILL_CMD(cs); | |
194 | trace_virtio_gpu_cmd_ctx_submit(cs.hdr.ctx_id, cs.size); | |
195 | ||
196 | buf = g_malloc(cs.size); | |
197 | s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, | |
198 | sizeof(cs), buf, cs.size); | |
199 | if (s != cs.size) { | |
200 | qemu_log_mask(LOG_GUEST_ERROR, "%s: size mismatch (%zd/%d)", | |
201 | __func__, s, cs.size); | |
202 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
8d94c1ca | 203 | goto out; |
9d9e1521 GH |
204 | } |
205 | ||
206 | if (virtio_gpu_stats_enabled(g->conf)) { | |
207 | g->stats.req_3d++; | |
208 | g->stats.bytes_3d += cs.size; | |
209 | } | |
210 | ||
211 | virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4); | |
212 | ||
8d94c1ca | 213 | out: |
9d9e1521 GH |
214 | g_free(buf); |
215 | } | |
216 | ||
217 | static void virgl_cmd_transfer_to_host_2d(VirtIOGPU *g, | |
218 | struct virtio_gpu_ctrl_command *cmd) | |
219 | { | |
220 | struct virtio_gpu_transfer_to_host_2d t2d; | |
221 | struct virtio_gpu_box box; | |
222 | ||
223 | VIRTIO_GPU_FILL_CMD(t2d); | |
224 | trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); | |
225 | ||
226 | box.x = t2d.r.x; | |
227 | box.y = t2d.r.y; | |
228 | box.z = 0; | |
229 | box.w = t2d.r.width; | |
230 | box.h = t2d.r.height; | |
231 | box.d = 1; | |
232 | ||
233 | virgl_renderer_transfer_write_iov(t2d.resource_id, | |
234 | 0, | |
235 | 0, | |
236 | 0, | |
237 | 0, | |
238 | (struct virgl_box *)&box, | |
239 | t2d.offset, NULL, 0); | |
240 | } | |
241 | ||
242 | static void virgl_cmd_transfer_to_host_3d(VirtIOGPU *g, | |
243 | struct virtio_gpu_ctrl_command *cmd) | |
244 | { | |
245 | struct virtio_gpu_transfer_host_3d t3d; | |
246 | ||
247 | VIRTIO_GPU_FILL_CMD(t3d); | |
248 | trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d.resource_id); | |
249 | ||
250 | virgl_renderer_transfer_write_iov(t3d.resource_id, | |
251 | t3d.hdr.ctx_id, | |
252 | t3d.level, | |
253 | t3d.stride, | |
254 | t3d.layer_stride, | |
255 | (struct virgl_box *)&t3d.box, | |
256 | t3d.offset, NULL, 0); | |
257 | } | |
258 | ||
259 | static void | |
260 | virgl_cmd_transfer_from_host_3d(VirtIOGPU *g, | |
261 | struct virtio_gpu_ctrl_command *cmd) | |
262 | { | |
263 | struct virtio_gpu_transfer_host_3d tf3d; | |
264 | ||
265 | VIRTIO_GPU_FILL_CMD(tf3d); | |
266 | trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d.resource_id); | |
267 | ||
268 | virgl_renderer_transfer_read_iov(tf3d.resource_id, | |
269 | tf3d.hdr.ctx_id, | |
270 | tf3d.level, | |
271 | tf3d.stride, | |
272 | tf3d.layer_stride, | |
273 | (struct virgl_box *)&tf3d.box, | |
274 | tf3d.offset, NULL, 0); | |
275 | } | |
276 | ||
277 | ||
278 | static void virgl_resource_attach_backing(VirtIOGPU *g, | |
279 | struct virtio_gpu_ctrl_command *cmd) | |
280 | { | |
281 | struct virtio_gpu_resource_attach_backing att_rb; | |
282 | struct iovec *res_iovs; | |
283 | int ret; | |
284 | ||
285 | VIRTIO_GPU_FILL_CMD(att_rb); | |
286 | trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id); | |
287 | ||
0c244e50 | 288 | ret = virtio_gpu_create_mapping_iov(&att_rb, cmd, NULL, &res_iovs); |
9d9e1521 GH |
289 | if (ret != 0) { |
290 | cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; | |
291 | return; | |
292 | } | |
293 | ||
294 | virgl_renderer_resource_attach_iov(att_rb.resource_id, | |
295 | res_iovs, att_rb.nr_entries); | |
296 | } | |
297 | ||
298 | static void virgl_resource_detach_backing(VirtIOGPU *g, | |
299 | struct virtio_gpu_ctrl_command *cmd) | |
300 | { | |
301 | struct virtio_gpu_resource_detach_backing detach_rb; | |
302 | struct iovec *res_iovs = NULL; | |
303 | int num_iovs = 0; | |
304 | ||
305 | VIRTIO_GPU_FILL_CMD(detach_rb); | |
306 | trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id); | |
307 | ||
308 | virgl_renderer_resource_detach_iov(detach_rb.resource_id, | |
309 | &res_iovs, | |
310 | &num_iovs); | |
311 | if (res_iovs == NULL || num_iovs == 0) { | |
312 | return; | |
313 | } | |
314 | virtio_gpu_cleanup_mapping_iov(res_iovs, num_iovs); | |
315 | } | |
316 | ||
317 | ||
318 | static void virgl_cmd_ctx_attach_resource(VirtIOGPU *g, | |
319 | struct virtio_gpu_ctrl_command *cmd) | |
320 | { | |
321 | struct virtio_gpu_ctx_resource att_res; | |
322 | ||
323 | VIRTIO_GPU_FILL_CMD(att_res); | |
324 | trace_virtio_gpu_cmd_ctx_res_attach(att_res.hdr.ctx_id, | |
325 | att_res.resource_id); | |
326 | ||
327 | virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id); | |
328 | } | |
329 | ||
330 | static void virgl_cmd_ctx_detach_resource(VirtIOGPU *g, | |
331 | struct virtio_gpu_ctrl_command *cmd) | |
332 | { | |
333 | struct virtio_gpu_ctx_resource det_res; | |
334 | ||
335 | VIRTIO_GPU_FILL_CMD(det_res); | |
336 | trace_virtio_gpu_cmd_ctx_res_detach(det_res.hdr.ctx_id, | |
337 | det_res.resource_id); | |
338 | ||
339 | virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id); | |
340 | } | |
341 | ||
342 | static void virgl_cmd_get_capset_info(VirtIOGPU *g, | |
343 | struct virtio_gpu_ctrl_command *cmd) | |
344 | { | |
345 | struct virtio_gpu_get_capset_info info; | |
346 | struct virtio_gpu_resp_capset_info resp; | |
347 | ||
348 | VIRTIO_GPU_FILL_CMD(info); | |
349 | ||
42a8dadc | 350 | memset(&resp, 0, sizeof(resp)); |
9d9e1521 GH |
351 | if (info.capset_index == 0) { |
352 | resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL; | |
353 | virgl_renderer_get_cap_set(resp.capset_id, | |
354 | &resp.capset_max_version, | |
355 | &resp.capset_max_size); | |
356 | } else { | |
357 | resp.capset_max_version = 0; | |
358 | resp.capset_max_size = 0; | |
359 | } | |
360 | resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO; | |
361 | virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); | |
362 | } | |
363 | ||
364 | static void virgl_cmd_get_capset(VirtIOGPU *g, | |
365 | struct virtio_gpu_ctrl_command *cmd) | |
366 | { | |
367 | struct virtio_gpu_get_capset gc; | |
368 | struct virtio_gpu_resp_capset *resp; | |
369 | uint32_t max_ver, max_size; | |
370 | VIRTIO_GPU_FILL_CMD(gc); | |
371 | ||
372 | virgl_renderer_get_cap_set(gc.capset_id, &max_ver, | |
373 | &max_size); | |
abd7f08b PP |
374 | if (!max_size) { |
375 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
376 | return; | |
377 | } | |
9d9e1521 | 378 | |
abd7f08b | 379 | resp = g_malloc(sizeof(*resp) + max_size); |
9d9e1521 GH |
380 | resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET; |
381 | virgl_renderer_fill_caps(gc.capset_id, | |
382 | gc.capset_version, | |
383 | (void *)resp->capset_data); | |
384 | virtio_gpu_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size); | |
385 | g_free(resp); | |
386 | } | |
387 | ||
388 | void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, | |
389 | struct virtio_gpu_ctrl_command *cmd) | |
390 | { | |
391 | VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); | |
392 | ||
321c9adb GH |
393 | cmd->waiting = g->renderer_blocked; |
394 | if (cmd->waiting) { | |
395 | return; | |
396 | } | |
397 | ||
9d9e1521 GH |
398 | virgl_renderer_force_ctx_0(); |
399 | switch (cmd->cmd_hdr.type) { | |
400 | case VIRTIO_GPU_CMD_CTX_CREATE: | |
401 | virgl_cmd_context_create(g, cmd); | |
402 | break; | |
403 | case VIRTIO_GPU_CMD_CTX_DESTROY: | |
404 | virgl_cmd_context_destroy(g, cmd); | |
405 | break; | |
406 | case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: | |
407 | virgl_cmd_create_resource_2d(g, cmd); | |
408 | break; | |
409 | case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D: | |
410 | virgl_cmd_create_resource_3d(g, cmd); | |
411 | break; | |
412 | case VIRTIO_GPU_CMD_SUBMIT_3D: | |
413 | virgl_cmd_submit_3d(g, cmd); | |
414 | break; | |
415 | case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: | |
416 | virgl_cmd_transfer_to_host_2d(g, cmd); | |
417 | break; | |
418 | case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D: | |
419 | virgl_cmd_transfer_to_host_3d(g, cmd); | |
420 | break; | |
421 | case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D: | |
422 | virgl_cmd_transfer_from_host_3d(g, cmd); | |
423 | break; | |
424 | case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: | |
425 | virgl_resource_attach_backing(g, cmd); | |
426 | break; | |
427 | case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: | |
428 | virgl_resource_detach_backing(g, cmd); | |
429 | break; | |
430 | case VIRTIO_GPU_CMD_SET_SCANOUT: | |
431 | virgl_cmd_set_scanout(g, cmd); | |
432 | break; | |
433 | case VIRTIO_GPU_CMD_RESOURCE_FLUSH: | |
434 | virgl_cmd_resource_flush(g, cmd); | |
435 | break; | |
436 | case VIRTIO_GPU_CMD_RESOURCE_UNREF: | |
437 | virgl_cmd_resource_unref(g, cmd); | |
438 | break; | |
439 | case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE: | |
440 | /* TODO add security */ | |
441 | virgl_cmd_ctx_attach_resource(g, cmd); | |
442 | break; | |
443 | case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE: | |
444 | /* TODO add security */ | |
445 | virgl_cmd_ctx_detach_resource(g, cmd); | |
446 | break; | |
447 | case VIRTIO_GPU_CMD_GET_CAPSET_INFO: | |
448 | virgl_cmd_get_capset_info(g, cmd); | |
449 | break; | |
450 | case VIRTIO_GPU_CMD_GET_CAPSET: | |
451 | virgl_cmd_get_capset(g, cmd); | |
452 | break; | |
453 | ||
454 | case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: | |
455 | virtio_gpu_get_display_info(g, cmd); | |
456 | break; | |
457 | default: | |
458 | cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; | |
459 | break; | |
460 | } | |
461 | ||
462 | if (cmd->finished) { | |
463 | return; | |
464 | } | |
465 | if (cmd->error) { | |
466 | fprintf(stderr, "%s: ctrl 0x%x, error 0x%x\n", __func__, | |
467 | cmd->cmd_hdr.type, cmd->error); | |
468 | virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error); | |
469 | return; | |
470 | } | |
471 | if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) { | |
472 | virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); | |
473 | return; | |
474 | } | |
475 | ||
476 | trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); | |
477 | virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); | |
478 | } | |
479 | ||
480 | static void virgl_write_fence(void *opaque, uint32_t fence) | |
481 | { | |
482 | VirtIOGPU *g = opaque; | |
483 | struct virtio_gpu_ctrl_command *cmd, *tmp; | |
484 | ||
485 | QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { | |
486 | /* | |
487 | * the guest can end up emitting fences out of order | |
488 | * so we should check all fenced cmds not just the first one. | |
489 | */ | |
490 | if (cmd->cmd_hdr.fence_id > fence) { | |
491 | continue; | |
492 | } | |
493 | trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id); | |
494 | virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); | |
495 | QTAILQ_REMOVE(&g->fenceq, cmd, next); | |
496 | g_free(cmd); | |
497 | g->inflight--; | |
498 | if (virtio_gpu_stats_enabled(g->conf)) { | |
499 | fprintf(stderr, "inflight: %3d (-)\r", g->inflight); | |
500 | } | |
501 | } | |
502 | } | |
503 | ||
504 | static virgl_renderer_gl_context | |
505 | virgl_create_context(void *opaque, int scanout_idx, | |
506 | struct virgl_renderer_gl_ctx_param *params) | |
507 | { | |
508 | VirtIOGPU *g = opaque; | |
509 | QEMUGLContext ctx; | |
510 | QEMUGLParams qparams; | |
511 | ||
512 | qparams.major_ver = params->major_ver; | |
513 | qparams.minor_ver = params->minor_ver; | |
514 | ||
515 | ctx = dpy_gl_ctx_create(g->scanout[scanout_idx].con, &qparams); | |
516 | return (virgl_renderer_gl_context)ctx; | |
517 | } | |
518 | ||
519 | static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx) | |
520 | { | |
521 | VirtIOGPU *g = opaque; | |
522 | QEMUGLContext qctx = (QEMUGLContext)ctx; | |
523 | ||
524 | dpy_gl_ctx_destroy(g->scanout[0].con, qctx); | |
525 | } | |
526 | ||
527 | static int virgl_make_context_current(void *opaque, int scanout_idx, | |
528 | virgl_renderer_gl_context ctx) | |
529 | { | |
530 | VirtIOGPU *g = opaque; | |
531 | QEMUGLContext qctx = (QEMUGLContext)ctx; | |
532 | ||
533 | return dpy_gl_ctx_make_current(g->scanout[scanout_idx].con, qctx); | |
534 | } | |
535 | ||
536 | static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = { | |
537 | .version = 1, | |
538 | .write_fence = virgl_write_fence, | |
539 | .create_gl_context = virgl_create_context, | |
540 | .destroy_gl_context = virgl_destroy_context, | |
541 | .make_current = virgl_make_context_current, | |
542 | }; | |
543 | ||
544 | static void virtio_gpu_print_stats(void *opaque) | |
545 | { | |
546 | VirtIOGPU *g = opaque; | |
547 | ||
548 | if (g->stats.requests) { | |
549 | fprintf(stderr, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n", | |
550 | g->stats.requests, | |
551 | g->stats.max_inflight, | |
552 | g->stats.req_3d, | |
553 | g->stats.bytes_3d); | |
554 | g->stats.requests = 0; | |
555 | g->stats.max_inflight = 0; | |
556 | g->stats.req_3d = 0; | |
557 | g->stats.bytes_3d = 0; | |
558 | } else { | |
559 | fprintf(stderr, "stats: idle\r"); | |
560 | } | |
561 | timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); | |
562 | } | |
563 | ||
564 | static void virtio_gpu_fence_poll(void *opaque) | |
565 | { | |
566 | VirtIOGPU *g = opaque; | |
567 | ||
568 | virgl_renderer_poll(); | |
0c55a1cf GH |
569 | virtio_gpu_process_cmdq(g); |
570 | if (!QTAILQ_EMPTY(&g->cmdq) || !QTAILQ_EMPTY(&g->fenceq)) { | |
9d9e1521 GH |
571 | timer_mod(g->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10); |
572 | } | |
573 | } | |
574 | ||
575 | void virtio_gpu_virgl_fence_poll(VirtIOGPU *g) | |
576 | { | |
577 | virtio_gpu_fence_poll(g); | |
578 | } | |
579 | ||
580 | void virtio_gpu_virgl_reset(VirtIOGPU *g) | |
581 | { | |
582 | int i; | |
583 | ||
584 | /* virgl_renderer_reset() ??? */ | |
585 | for (i = 0; i < g->conf.max_outputs; i++) { | |
586 | if (i != 0) { | |
587 | dpy_gfx_replace_surface(g->scanout[i].con, NULL); | |
588 | } | |
9d8256eb | 589 | dpy_gl_scanout(g->scanout[i].con, 0, false, 0, 0, 0, 0, 0, 0); |
9d9e1521 GH |
590 | } |
591 | } | |
592 | ||
593 | int virtio_gpu_virgl_init(VirtIOGPU *g) | |
594 | { | |
595 | int ret; | |
596 | ||
597 | ret = virgl_renderer_init(g, 0, &virtio_gpu_3d_cbs); | |
598 | if (ret != 0) { | |
599 | return ret; | |
600 | } | |
601 | ||
602 | g->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL, | |
603 | virtio_gpu_fence_poll, g); | |
604 | ||
605 | if (virtio_gpu_stats_enabled(g->conf)) { | |
606 | g->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL, | |
607 | virtio_gpu_print_stats, g); | |
608 | timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); | |
609 | } | |
610 | return 0; | |
611 | } | |
612 | ||
613 | #endif /* CONFIG_VIRGL */ |