2 * Copyright (C) 2015 Red Hat, Inc.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
30 #include "virtgpu_drv.h"
31 #include <linux/virtio.h>
32 #include <linux/virtio_config.h>
33 #include <linux/virtio_ring.h>
35 #define MAX_INLINE_CMD_SIZE 96
36 #define MAX_INLINE_RESP_SIZE 24
37 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
38 + MAX_INLINE_CMD_SIZE \
39 + MAX_INLINE_RESP_SIZE)
41 void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
46 idr_preload(GFP_KERNEL);
47 spin_lock(&vgdev->resource_idr_lock);
48 handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
49 spin_unlock(&vgdev->resource_idr_lock);
54 void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
56 spin_lock(&vgdev->resource_idr_lock);
57 idr_remove(&vgdev->resource_idr, id);
58 spin_unlock(&vgdev->resource_idr_lock);
61 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
63 struct drm_device *dev = vq->vdev->priv;
64 struct virtio_gpu_device *vgdev = dev->dev_private;
66 schedule_work(&vgdev->ctrlq.dequeue_work);
69 void virtio_gpu_cursor_ack(struct virtqueue *vq)
71 struct drm_device *dev = vq->vdev->priv;
72 struct virtio_gpu_device *vgdev = dev->dev_private;
74 schedule_work(&vgdev->cursorq.dequeue_work);
77 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
79 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
81 __alignof__(struct virtio_gpu_vbuffer),
88 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
90 kmem_cache_destroy(vgdev->vbufs);
94 static struct virtio_gpu_vbuffer*
95 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
96 int size, int resp_size, void *resp_buf,
97 virtio_gpu_resp_cb resp_cb)
99 struct virtio_gpu_vbuffer *vbuf;
101 vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL);
103 return ERR_PTR(-ENOMEM);
104 memset(vbuf, 0, VBUFFER_SIZE);
106 BUG_ON(size > MAX_INLINE_CMD_SIZE);
107 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
110 vbuf->resp_cb = resp_cb;
111 vbuf->resp_size = resp_size;
112 if (resp_size <= MAX_INLINE_RESP_SIZE)
113 vbuf->resp_buf = (void *)vbuf->buf + size;
115 vbuf->resp_buf = resp_buf;
116 BUG_ON(!vbuf->resp_buf);
120 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
121 struct virtio_gpu_vbuffer **vbuffer_p,
124 struct virtio_gpu_vbuffer *vbuf;
126 vbuf = virtio_gpu_get_vbuf(vgdev, size,
127 sizeof(struct virtio_gpu_ctrl_hdr),
131 return ERR_CAST(vbuf);
137 static struct virtio_gpu_update_cursor*
138 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
139 struct virtio_gpu_vbuffer **vbuffer_p)
141 struct virtio_gpu_vbuffer *vbuf;
143 vbuf = virtio_gpu_get_vbuf
144 (vgdev, sizeof(struct virtio_gpu_update_cursor),
148 return ERR_CAST(vbuf);
151 return (struct virtio_gpu_update_cursor *)vbuf->buf;
154 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
155 virtio_gpu_resp_cb cb,
156 struct virtio_gpu_vbuffer **vbuffer_p,
157 int cmd_size, int resp_size,
160 struct virtio_gpu_vbuffer *vbuf;
162 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
163 resp_size, resp_buf, cb);
166 return ERR_CAST(vbuf);
169 return (struct virtio_gpu_command *)vbuf->buf;
172 static void free_vbuf(struct virtio_gpu_device *vgdev,
173 struct virtio_gpu_vbuffer *vbuf)
175 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
176 kfree(vbuf->resp_buf);
177 kfree(vbuf->data_buf);
178 kmem_cache_free(vgdev->vbufs, vbuf);
181 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
183 struct virtio_gpu_vbuffer *vbuf;
187 while ((vbuf = virtqueue_get_buf(vq, &len))) {
188 list_add_tail(&vbuf->list, reclaim_list);
192 DRM_DEBUG("Huh? zero vbufs reclaimed");
195 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
197 struct virtio_gpu_device *vgdev =
198 container_of(work, struct virtio_gpu_device,
200 struct list_head reclaim_list;
201 struct virtio_gpu_vbuffer *entry, *tmp;
202 struct virtio_gpu_ctrl_hdr *resp;
205 INIT_LIST_HEAD(&reclaim_list);
206 spin_lock(&vgdev->ctrlq.qlock);
208 virtqueue_disable_cb(vgdev->ctrlq.vq);
209 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
211 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
212 spin_unlock(&vgdev->ctrlq.qlock);
214 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
215 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
216 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
217 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
218 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
219 u64 f = le64_to_cpu(resp->fence_id);
222 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
223 __func__, fence_id, f);
229 entry->resp_cb(vgdev, entry);
231 list_del(&entry->list);
232 free_vbuf(vgdev, entry);
234 wake_up(&vgdev->ctrlq.ack_queue);
237 virtio_gpu_fence_event_process(vgdev, fence_id);
240 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
242 struct virtio_gpu_device *vgdev =
243 container_of(work, struct virtio_gpu_device,
244 cursorq.dequeue_work);
245 struct list_head reclaim_list;
246 struct virtio_gpu_vbuffer *entry, *tmp;
248 INIT_LIST_HEAD(&reclaim_list);
249 spin_lock(&vgdev->cursorq.qlock);
251 virtqueue_disable_cb(vgdev->cursorq.vq);
252 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
253 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
254 spin_unlock(&vgdev->cursorq.qlock);
256 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
257 list_del(&entry->list);
258 free_vbuf(vgdev, entry);
260 wake_up(&vgdev->cursorq.ack_queue);
263 static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
264 struct virtio_gpu_vbuffer *vbuf)
265 __releases(&vgdev->ctrlq.qlock)
266 __acquires(&vgdev->ctrlq.qlock)
268 struct virtqueue *vq = vgdev->ctrlq.vq;
269 struct scatterlist *sgs[3], vcmd, vout, vresp;
270 int outcnt = 0, incnt = 0;
273 if (!vgdev->vqs_ready)
276 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
277 sgs[outcnt + incnt] = &vcmd;
280 if (vbuf->data_size) {
281 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
282 sgs[outcnt + incnt] = &vout;
286 if (vbuf->resp_size) {
287 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
288 sgs[outcnt + incnt] = &vresp;
293 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
294 if (ret == -ENOSPC) {
295 spin_unlock(&vgdev->ctrlq.qlock);
296 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
297 spin_lock(&vgdev->ctrlq.qlock);
308 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
309 struct virtio_gpu_vbuffer *vbuf)
313 spin_lock(&vgdev->ctrlq.qlock);
314 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
315 spin_unlock(&vgdev->ctrlq.qlock);
319 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
320 struct virtio_gpu_vbuffer *vbuf,
321 struct virtio_gpu_ctrl_hdr *hdr,
322 struct virtio_gpu_fence **fence)
324 struct virtqueue *vq = vgdev->ctrlq.vq;
328 spin_lock(&vgdev->ctrlq.qlock);
331 * Make sure we have enouth space in the virtqueue. If not
332 * wait here until we have.
334 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
335 * to wait for free space, which can result in fence ids being
336 * submitted out-of-order.
338 if (vq->num_free < 3) {
339 spin_unlock(&vgdev->ctrlq.qlock);
340 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
345 virtio_gpu_fence_emit(vgdev, hdr, fence);
346 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
347 spin_unlock(&vgdev->ctrlq.qlock);
351 static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
352 struct virtio_gpu_vbuffer *vbuf)
354 struct virtqueue *vq = vgdev->cursorq.vq;
355 struct scatterlist *sgs[1], ccmd;
359 if (!vgdev->vqs_ready)
362 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
366 spin_lock(&vgdev->cursorq.qlock);
368 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
369 if (ret == -ENOSPC) {
370 spin_unlock(&vgdev->cursorq.qlock);
371 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
372 spin_lock(&vgdev->cursorq.qlock);
378 spin_unlock(&vgdev->cursorq.qlock);
385 /* just create gem objects for userspace and long lived objects,
386 * just use dma_alloced pages for the queue objects?
389 /* create a basic resource */
390 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
391 uint32_t resource_id,
396 struct virtio_gpu_resource_create_2d *cmd_p;
397 struct virtio_gpu_vbuffer *vbuf;
399 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
400 memset(cmd_p, 0, sizeof(*cmd_p));
402 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
403 cmd_p->resource_id = cpu_to_le32(resource_id);
404 cmd_p->format = cpu_to_le32(format);
405 cmd_p->width = cpu_to_le32(width);
406 cmd_p->height = cpu_to_le32(height);
408 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
411 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
412 uint32_t resource_id)
414 struct virtio_gpu_resource_unref *cmd_p;
415 struct virtio_gpu_vbuffer *vbuf;
417 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
418 memset(cmd_p, 0, sizeof(*cmd_p));
420 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
421 cmd_p->resource_id = cpu_to_le32(resource_id);
423 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
426 static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
427 uint32_t resource_id,
428 struct virtio_gpu_fence **fence)
430 struct virtio_gpu_resource_detach_backing *cmd_p;
431 struct virtio_gpu_vbuffer *vbuf;
433 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
434 memset(cmd_p, 0, sizeof(*cmd_p));
436 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
437 cmd_p->resource_id = cpu_to_le32(resource_id);
439 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
442 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
443 uint32_t scanout_id, uint32_t resource_id,
444 uint32_t width, uint32_t height,
445 uint32_t x, uint32_t y)
447 struct virtio_gpu_set_scanout *cmd_p;
448 struct virtio_gpu_vbuffer *vbuf;
450 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
451 memset(cmd_p, 0, sizeof(*cmd_p));
453 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
454 cmd_p->resource_id = cpu_to_le32(resource_id);
455 cmd_p->scanout_id = cpu_to_le32(scanout_id);
456 cmd_p->r.width = cpu_to_le32(width);
457 cmd_p->r.height = cpu_to_le32(height);
458 cmd_p->r.x = cpu_to_le32(x);
459 cmd_p->r.y = cpu_to_le32(y);
461 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
464 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
465 uint32_t resource_id,
466 uint32_t x, uint32_t y,
467 uint32_t width, uint32_t height)
469 struct virtio_gpu_resource_flush *cmd_p;
470 struct virtio_gpu_vbuffer *vbuf;
472 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
473 memset(cmd_p, 0, sizeof(*cmd_p));
475 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
476 cmd_p->resource_id = cpu_to_le32(resource_id);
477 cmd_p->r.width = cpu_to_le32(width);
478 cmd_p->r.height = cpu_to_le32(height);
479 cmd_p->r.x = cpu_to_le32(x);
480 cmd_p->r.y = cpu_to_le32(y);
482 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
485 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
486 struct virtio_gpu_object *bo,
488 __le32 width, __le32 height,
490 struct virtio_gpu_fence **fence)
492 struct virtio_gpu_transfer_to_host_2d *cmd_p;
493 struct virtio_gpu_vbuffer *vbuf;
494 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
497 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
498 bo->pages->sgl, bo->pages->nents,
501 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
502 memset(cmd_p, 0, sizeof(*cmd_p));
504 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
505 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
506 cmd_p->offset = cpu_to_le64(offset);
507 cmd_p->r.width = width;
508 cmd_p->r.height = height;
512 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
516 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
517 uint32_t resource_id,
518 struct virtio_gpu_mem_entry *ents,
520 struct virtio_gpu_fence **fence)
522 struct virtio_gpu_resource_attach_backing *cmd_p;
523 struct virtio_gpu_vbuffer *vbuf;
525 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
526 memset(cmd_p, 0, sizeof(*cmd_p));
528 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
529 cmd_p->resource_id = cpu_to_le32(resource_id);
530 cmd_p->nr_entries = cpu_to_le32(nents);
532 vbuf->data_buf = ents;
533 vbuf->data_size = sizeof(*ents) * nents;
535 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
538 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
539 struct virtio_gpu_vbuffer *vbuf)
541 struct virtio_gpu_resp_display_info *resp =
542 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
545 spin_lock(&vgdev->display_info_lock);
546 for (i = 0; i < vgdev->num_scanouts; i++) {
547 vgdev->outputs[i].info = resp->pmodes[i];
548 if (resp->pmodes[i].enabled) {
549 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
550 le32_to_cpu(resp->pmodes[i].r.width),
551 le32_to_cpu(resp->pmodes[i].r.height),
552 le32_to_cpu(resp->pmodes[i].r.x),
553 le32_to_cpu(resp->pmodes[i].r.y));
555 DRM_DEBUG("output %d: disabled", i);
559 vgdev->display_info_pending = false;
560 spin_unlock(&vgdev->display_info_lock);
561 wake_up(&vgdev->resp_wq);
563 if (!drm_helper_hpd_irq_event(vgdev->ddev))
564 drm_kms_helper_hotplug_event(vgdev->ddev);
567 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
568 struct virtio_gpu_vbuffer *vbuf)
570 struct virtio_gpu_get_capset_info *cmd =
571 (struct virtio_gpu_get_capset_info *)vbuf->buf;
572 struct virtio_gpu_resp_capset_info *resp =
573 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
574 int i = le32_to_cpu(cmd->capset_index);
576 spin_lock(&vgdev->display_info_lock);
577 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
578 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
579 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
580 spin_unlock(&vgdev->display_info_lock);
581 wake_up(&vgdev->resp_wq);
584 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
585 struct virtio_gpu_vbuffer *vbuf)
587 struct virtio_gpu_get_capset *cmd =
588 (struct virtio_gpu_get_capset *)vbuf->buf;
589 struct virtio_gpu_resp_capset *resp =
590 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
591 struct virtio_gpu_drv_cap_cache *cache_ent;
593 spin_lock(&vgdev->display_info_lock);
594 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
595 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
596 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
597 memcpy(cache_ent->caps_cache, resp->capset_data,
599 atomic_set(&cache_ent->is_valid, 1);
603 spin_unlock(&vgdev->display_info_lock);
604 wake_up(&vgdev->resp_wq);
607 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
609 struct virtio_gpu_ctrl_hdr *cmd_p;
610 struct virtio_gpu_vbuffer *vbuf;
613 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
618 cmd_p = virtio_gpu_alloc_cmd_resp
619 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
620 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
622 memset(cmd_p, 0, sizeof(*cmd_p));
624 vgdev->display_info_pending = true;
625 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
626 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
630 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
632 struct virtio_gpu_get_capset_info *cmd_p;
633 struct virtio_gpu_vbuffer *vbuf;
636 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
641 cmd_p = virtio_gpu_alloc_cmd_resp
642 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
643 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
645 memset(cmd_p, 0, sizeof(*cmd_p));
647 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
648 cmd_p->capset_index = cpu_to_le32(idx);
649 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
653 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
654 int idx, int version,
655 struct virtio_gpu_drv_cap_cache **cache_p)
657 struct virtio_gpu_get_capset *cmd_p;
658 struct virtio_gpu_vbuffer *vbuf;
660 struct virtio_gpu_drv_cap_cache *cache_ent;
663 if (idx >= vgdev->num_capsets)
666 if (version > vgdev->capsets[idx].max_version)
669 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
673 max_size = vgdev->capsets[idx].max_size;
674 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
675 if (!cache_ent->caps_cache) {
680 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
683 kfree(cache_ent->caps_cache);
688 cache_ent->version = version;
689 cache_ent->id = vgdev->capsets[idx].id;
690 atomic_set(&cache_ent->is_valid, 0);
691 cache_ent->size = max_size;
692 spin_lock(&vgdev->display_info_lock);
693 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
694 spin_unlock(&vgdev->display_info_lock);
696 cmd_p = virtio_gpu_alloc_cmd_resp
697 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
698 sizeof(struct virtio_gpu_resp_capset) + max_size,
700 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
701 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
702 cmd_p->capset_version = cpu_to_le32(version);
703 *cache_p = cache_ent;
704 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
709 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
710 uint32_t nlen, const char *name)
712 struct virtio_gpu_ctx_create *cmd_p;
713 struct virtio_gpu_vbuffer *vbuf;
715 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
716 memset(cmd_p, 0, sizeof(*cmd_p));
718 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
719 cmd_p->hdr.ctx_id = cpu_to_le32(id);
720 cmd_p->nlen = cpu_to_le32(nlen);
721 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
722 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
723 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
726 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
729 struct virtio_gpu_ctx_destroy *cmd_p;
730 struct virtio_gpu_vbuffer *vbuf;
732 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
733 memset(cmd_p, 0, sizeof(*cmd_p));
735 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
736 cmd_p->hdr.ctx_id = cpu_to_le32(id);
737 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
740 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
742 uint32_t resource_id)
744 struct virtio_gpu_ctx_resource *cmd_p;
745 struct virtio_gpu_vbuffer *vbuf;
747 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
748 memset(cmd_p, 0, sizeof(*cmd_p));
750 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
751 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
752 cmd_p->resource_id = cpu_to_le32(resource_id);
753 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
757 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
759 uint32_t resource_id)
761 struct virtio_gpu_ctx_resource *cmd_p;
762 struct virtio_gpu_vbuffer *vbuf;
764 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
765 memset(cmd_p, 0, sizeof(*cmd_p));
767 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
768 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
769 cmd_p->resource_id = cpu_to_le32(resource_id);
770 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
774 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
775 struct virtio_gpu_resource_create_3d *rc_3d,
776 struct virtio_gpu_fence **fence)
778 struct virtio_gpu_resource_create_3d *cmd_p;
779 struct virtio_gpu_vbuffer *vbuf;
781 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
782 memset(cmd_p, 0, sizeof(*cmd_p));
785 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
786 cmd_p->hdr.flags = 0;
788 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
791 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
792 struct virtio_gpu_object *bo,
794 uint64_t offset, uint32_t level,
795 struct virtio_gpu_box *box,
796 struct virtio_gpu_fence **fence)
798 struct virtio_gpu_transfer_host_3d *cmd_p;
799 struct virtio_gpu_vbuffer *vbuf;
800 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
803 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
804 bo->pages->sgl, bo->pages->nents,
807 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
808 memset(cmd_p, 0, sizeof(*cmd_p));
810 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
811 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
812 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
814 cmd_p->offset = cpu_to_le64(offset);
815 cmd_p->level = cpu_to_le32(level);
817 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
820 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
821 uint32_t resource_id, uint32_t ctx_id,
822 uint64_t offset, uint32_t level,
823 struct virtio_gpu_box *box,
824 struct virtio_gpu_fence **fence)
826 struct virtio_gpu_transfer_host_3d *cmd_p;
827 struct virtio_gpu_vbuffer *vbuf;
829 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
830 memset(cmd_p, 0, sizeof(*cmd_p));
832 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
833 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
834 cmd_p->resource_id = cpu_to_le32(resource_id);
836 cmd_p->offset = cpu_to_le64(offset);
837 cmd_p->level = cpu_to_le32(level);
839 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
842 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
843 void *data, uint32_t data_size,
844 uint32_t ctx_id, struct virtio_gpu_fence **fence)
846 struct virtio_gpu_cmd_submit *cmd_p;
847 struct virtio_gpu_vbuffer *vbuf;
849 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
850 memset(cmd_p, 0, sizeof(*cmd_p));
852 vbuf->data_buf = data;
853 vbuf->data_size = data_size;
855 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
856 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
857 cmd_p->size = cpu_to_le32(data_size);
859 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
862 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
863 struct virtio_gpu_object *obj,
864 uint32_t resource_id,
865 struct virtio_gpu_fence **fence)
867 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
868 struct virtio_gpu_mem_entry *ents;
869 struct scatterlist *sg;
875 ret = virtio_gpu_object_get_sg_table(vgdev, obj);
881 obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
882 obj->pages->sgl, obj->pages->nents,
886 nents = obj->pages->nents;
889 /* gets freed when the ring has consumed it */
890 ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
893 DRM_ERROR("failed to allocate ent list\n");
897 for_each_sg(obj->pages->sgl, sg, nents, si) {
898 ents[si].addr = cpu_to_le64(use_dma_api
901 ents[si].length = cpu_to_le32(sg->length);
902 ents[si].padding = 0;
905 virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
908 obj->hw_res_handle = resource_id;
912 void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
913 struct virtio_gpu_object *obj)
915 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
916 struct virtio_gpu_fence *fence;
918 if (use_dma_api && obj->mapped) {
919 /* detach backing and wait for the host process it ... */
920 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence);
921 dma_fence_wait(&fence->f, true);
922 dma_fence_put(&fence->f);
924 /* ... then tear down iommu mappings */
925 dma_unmap_sg(vgdev->vdev->dev.parent,
926 obj->pages->sgl, obj->mapped,
930 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
934 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
935 struct virtio_gpu_output *output)
937 struct virtio_gpu_vbuffer *vbuf;
938 struct virtio_gpu_update_cursor *cur_p;
940 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
941 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
942 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
943 virtio_gpu_queue_cursor(vgdev, vbuf);