1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
7 #include <linux/file.h>
8 #include <linux/sync_file.h>
9 #include <linux/uaccess.h>
11 #include <drm/drm_drv.h>
12 #include <drm/drm_file.h>
13 #include <drm/drm_syncobj.h>
18 #include "msm_gpu_trace.h"
21 * Cmdstream submission:
24 static struct msm_gem_submit *submit_create(struct drm_device *dev,
26 struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
29 static atomic_t ident = ATOMIC_INIT(0);
30 struct msm_gem_submit *submit;
34 sz = struct_size(submit, bos, nr_bos) +
35 ((u64)nr_cmds * sizeof(submit->cmd[0]));
38 return ERR_PTR(-ENOMEM);
40 submit = kzalloc(sz, GFP_KERNEL);
42 return ERR_PTR(-ENOMEM);
44 ret = drm_sched_job_init(&submit->base, queue->entity, queue);
50 kref_init(&submit->ref);
52 submit->aspace = queue->ctx->aspace;
54 submit->cmd = (void *)&submit->bos[nr_bos];
55 submit->queue = queue;
56 submit->pid = get_pid(task_pid(current));
57 submit->ring = gpu->rb[queue->ring_nr];
58 submit->fault_dumped = false;
60 /* Get a unique identifier for the submission for logging purposes */
61 submit->ident = atomic_inc_return(&ident) - 1;
63 INIT_LIST_HEAD(&submit->node);
68 void __msm_gem_submit_destroy(struct kref *kref)
70 struct msm_gem_submit *submit =
71 container_of(kref, struct msm_gem_submit, ref);
74 if (submit->fence_id) {
75 mutex_lock(&submit->queue->idr_lock);
76 idr_remove(&submit->queue->fence_idr, submit->fence_id);
77 mutex_unlock(&submit->queue->idr_lock);
80 dma_fence_put(submit->user_fence);
81 dma_fence_put(submit->hw_fence);
84 msm_submitqueue_put(submit->queue);
86 for (i = 0; i < submit->nr_cmds; i++)
87 kfree(submit->cmd[i].relocs);
92 static int submit_lookup_objects(struct msm_gem_submit *submit,
93 struct drm_msm_gem_submit *args, struct drm_file *file)
98 for (i = 0; i < args->nr_bos; i++) {
99 struct drm_msm_gem_submit_bo submit_bo;
100 void __user *userptr =
101 u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
103 /* make sure we don't have garbage flags, in case we hit
104 * error path before flags is initialized:
106 submit->bos[i].flags = 0;
108 if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
114 /* at least one of READ and/or WRITE flags should be set: */
115 #define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
117 if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
118 !(submit_bo.flags & MANDATORY_FLAGS)) {
119 DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
125 submit->bos[i].handle = submit_bo.handle;
126 submit->bos[i].flags = submit_bo.flags;
127 /* in validate_objects() we figure out if this is true: */
128 submit->bos[i].iova = submit_bo.presumed;
131 spin_lock(&file->table_lock);
133 for (i = 0; i < args->nr_bos; i++) {
134 struct drm_gem_object *obj;
136 /* normally use drm_gem_object_lookup(), but for bulk lookup
137 * all under single table_lock just hit object_idr directly:
139 obj = idr_find(&file->object_idr, submit->bos[i].handle);
141 DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
146 drm_gem_object_get(obj);
148 submit->bos[i].obj = to_msm_bo(obj);
152 spin_unlock(&file->table_lock);
160 static int submit_lookup_cmds(struct msm_gem_submit *submit,
161 struct drm_msm_gem_submit *args, struct drm_file *file)
167 for (i = 0; i < args->nr_cmds; i++) {
168 struct drm_msm_gem_submit_cmd submit_cmd;
169 void __user *userptr =
170 u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
172 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
178 /* validate input from userspace: */
179 switch (submit_cmd.type) {
180 case MSM_SUBMIT_CMD_BUF:
181 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
182 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
185 DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
189 if (submit_cmd.size % 4) {
190 DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
196 submit->cmd[i].type = submit_cmd.type;
197 submit->cmd[i].size = submit_cmd.size / 4;
198 submit->cmd[i].offset = submit_cmd.submit_offset / 4;
199 submit->cmd[i].idx = submit_cmd.submit_idx;
200 submit->cmd[i].nr_relocs = submit_cmd.nr_relocs;
202 userptr = u64_to_user_ptr(submit_cmd.relocs);
204 sz = array_size(submit_cmd.nr_relocs,
205 sizeof(struct drm_msm_gem_submit_reloc));
206 /* check for overflow: */
207 if (sz == SIZE_MAX) {
211 submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL);
212 if (!submit->cmd[i].relocs) {
216 ret = copy_from_user(submit->cmd[i].relocs, userptr, sz);
227 /* Unwind bo state, according to cleanup_flags. In the success case, only
228 * the lock is dropped at the end of the submit (and active/pin ref is dropped
229 * later when the submit is retired).
231 static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
232 unsigned cleanup_flags)
234 struct drm_gem_object *obj = &submit->bos[i].obj->base;
235 unsigned flags = submit->bos[i].flags & cleanup_flags;
238 * Clear flags bit before dropping lock, so that the msm_job_run()
239 * path isn't racing with submit_cleanup() (ie. the read/modify/
240 * write is protected by the obj lock in all paths)
242 submit->bos[i].flags &= ~cleanup_flags;
244 if (flags & BO_VMA_PINNED)
245 msm_gem_unpin_vma(submit->bos[i].vma);
247 if (flags & BO_OBJ_PINNED)
248 msm_gem_unpin_locked(obj);
250 if (flags & BO_LOCKED)
251 dma_resv_unlock(obj->resv);
254 static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
256 unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED | BO_LOCKED;
257 submit_cleanup_bo(submit, i, cleanup_flags);
259 if (!(submit->bos[i].flags & BO_VALID))
260 submit->bos[i].iova = 0;
263 /* This is where we make sure all the bo's are reserved and pin'd: */
264 static int submit_lock_objects(struct msm_gem_submit *submit)
266 int contended, slow_locked = -1, i, ret = 0;
269 for (i = 0; i < submit->nr_bos; i++) {
270 struct msm_gem_object *msm_obj = submit->bos[i].obj;
272 if (slow_locked == i)
277 if (!(submit->bos[i].flags & BO_LOCKED)) {
278 ret = dma_resv_lock_interruptible(msm_obj->base.resv,
282 submit->bos[i].flags |= BO_LOCKED;
286 ww_acquire_done(&submit->ticket);
291 if (ret == -EALREADY) {
292 DRM_ERROR("handle %u at index %u already on submit list\n",
293 submit->bos[i].handle, i);
298 submit_unlock_unpin_bo(submit, i);
301 submit_unlock_unpin_bo(submit, slow_locked);
303 if (ret == -EDEADLK) {
304 struct msm_gem_object *msm_obj = submit->bos[contended].obj;
305 /* we lost out in a seqno race, lock and retry.. */
306 ret = dma_resv_lock_slow_interruptible(msm_obj->base.resv,
309 submit->bos[contended].flags |= BO_LOCKED;
310 slow_locked = contended;
314 /* Not expecting -EALREADY here, if the bo was already
315 * locked, we should have gotten -EALREADY already from
316 * the dma_resv_lock_interruptable() call.
318 WARN_ON_ONCE(ret == -EALREADY);
324 static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
328 for (i = 0; i < submit->nr_bos; i++) {
329 struct drm_gem_object *obj = &submit->bos[i].obj->base;
330 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
332 /* NOTE: _reserve_shared() must happen before
333 * _add_shared_fence(), which makes this a slightly
334 * strange place to call it. OTOH this is a
335 * convenient can-fail point to hook it in.
337 ret = dma_resv_reserve_fences(obj->resv, 1);
341 /* If userspace has determined that explicit fencing is
342 * used, it can disable implicit sync on the entire
348 /* Otherwise userspace can ask for implicit sync to be
349 * disabled on specific buffers. This is useful for internal
350 * usermode driver managed buffers, suballocation, etc.
352 if (submit->bos[i].flags & MSM_SUBMIT_BO_NO_IMPLICIT)
355 ret = drm_sched_job_add_implicit_dependencies(&submit->base,
365 static int submit_pin_objects(struct msm_gem_submit *submit)
369 submit->valid = true;
371 for (i = 0; i < submit->nr_bos; i++) {
372 struct drm_gem_object *obj = &submit->bos[i].obj->base;
373 struct msm_gem_vma *vma;
375 /* if locking succeeded, pin bo: */
376 vma = msm_gem_get_vma_locked(obj, submit->aspace);
382 ret = msm_gem_pin_vma_locked(obj, vma);
386 submit->bos[i].flags |= BO_OBJ_PINNED | BO_VMA_PINNED;
387 submit->bos[i].vma = vma;
389 if (vma->iova == submit->bos[i].iova) {
390 submit->bos[i].flags |= BO_VALID;
392 submit->bos[i].iova = vma->iova;
393 /* iova changed, so address in cmdstream is not valid: */
394 submit->bos[i].flags &= ~BO_VALID;
395 submit->valid = false;
402 static void submit_attach_object_fences(struct msm_gem_submit *submit)
406 for (i = 0; i < submit->nr_bos; i++) {
407 struct drm_gem_object *obj = &submit->bos[i].obj->base;
409 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
410 dma_resv_add_fence(obj->resv, submit->user_fence,
411 DMA_RESV_USAGE_WRITE);
412 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
413 dma_resv_add_fence(obj->resv, submit->user_fence,
414 DMA_RESV_USAGE_READ);
418 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
419 struct msm_gem_object **obj, uint64_t *iova, bool *valid)
421 if (idx >= submit->nr_bos) {
422 DRM_ERROR("invalid buffer index: %u (out of %u)\n",
423 idx, submit->nr_bos);
428 *obj = submit->bos[idx].obj;
430 *iova = submit->bos[idx].iova;
432 *valid = !!(submit->bos[idx].flags & BO_VALID);
437 /* process the reloc's and patch up the cmdstream as needed: */
438 static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
439 uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs)
441 uint32_t i, last_offset = 0;
449 DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
453 /* For now, just map the entire thing. Eventually we probably
454 * to do it page-by-page, w/ kmap() if not vmap()d..
456 ptr = msm_gem_get_vaddr_locked(&obj->base);
460 DBG("failed to map: %d", ret);
464 for (i = 0; i < nr_relocs; i++) {
465 struct drm_msm_gem_submit_reloc submit_reloc = relocs[i];
470 if (submit_reloc.submit_offset % 4) {
471 DRM_ERROR("non-aligned reloc offset: %u\n",
472 submit_reloc.submit_offset);
477 /* offset in dwords: */
478 off = submit_reloc.submit_offset / 4;
480 if ((off >= (obj->base.size / 4)) ||
481 (off < last_offset)) {
482 DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
487 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
494 iova += submit_reloc.reloc_offset;
496 if (submit_reloc.shift < 0)
497 iova >>= -submit_reloc.shift;
499 iova <<= submit_reloc.shift;
501 ptr[off] = iova | submit_reloc.or;
507 msm_gem_put_vaddr_locked(&obj->base);
512 /* Cleanup submit at end of ioctl. In the error case, this also drops
513 * references, unpins, and drops active refcnt. In the non-error case,
514 * this is done when the submit is retired.
516 static void submit_cleanup(struct msm_gem_submit *submit, bool error)
518 unsigned cleanup_flags = BO_LOCKED;
522 cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED;
524 for (i = 0; i < submit->nr_bos; i++) {
525 struct msm_gem_object *msm_obj = submit->bos[i].obj;
526 submit_cleanup_bo(submit, i, cleanup_flags);
528 drm_gem_object_put(&msm_obj->base);
532 void msm_submit_retire(struct msm_gem_submit *submit)
536 for (i = 0; i < submit->nr_bos; i++) {
537 struct drm_gem_object *obj = &submit->bos[i].obj->base;
539 drm_gem_object_put(obj);
543 struct msm_submit_post_dep {
544 struct drm_syncobj *syncobj;
546 struct dma_fence_chain *chain;
549 static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
550 struct drm_file *file,
551 uint64_t in_syncobjs_addr,
552 uint32_t nr_in_syncobjs,
553 size_t syncobj_stride,
554 struct msm_ringbuffer *ring)
556 struct drm_syncobj **syncobjs = NULL;
557 struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
561 syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
562 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
564 return ERR_PTR(-ENOMEM);
566 for (i = 0; i < nr_in_syncobjs; ++i) {
567 uint64_t address = in_syncobjs_addr + i * syncobj_stride;
568 struct dma_fence *fence;
570 if (copy_from_user(&syncobj_desc,
571 u64_to_user_ptr(address),
572 min(syncobj_stride, sizeof(syncobj_desc)))) {
577 if (syncobj_desc.point &&
578 !drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) {
583 if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
588 ret = drm_syncobj_find_fence(file, syncobj_desc.handle,
589 syncobj_desc.point, 0, &fence);
593 ret = drm_sched_job_add_dependency(&submit->base, fence);
597 if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) {
599 drm_syncobj_find(file, syncobj_desc.handle);
608 for (j = 0; j <= i; ++j) {
610 drm_syncobj_put(syncobjs[j]);
618 static void msm_reset_syncobjs(struct drm_syncobj **syncobjs,
619 uint32_t nr_syncobjs)
623 for (i = 0; syncobjs && i < nr_syncobjs; ++i) {
625 drm_syncobj_replace_fence(syncobjs[i], NULL);
629 static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
630 struct drm_file *file,
631 uint64_t syncobjs_addr,
632 uint32_t nr_syncobjs,
633 size_t syncobj_stride)
635 struct msm_submit_post_dep *post_deps;
636 struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
640 post_deps = kmalloc_array(nr_syncobjs, sizeof(*post_deps),
641 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
643 return ERR_PTR(-ENOMEM);
645 for (i = 0; i < nr_syncobjs; ++i) {
646 uint64_t address = syncobjs_addr + i * syncobj_stride;
648 if (copy_from_user(&syncobj_desc,
649 u64_to_user_ptr(address),
650 min(syncobj_stride, sizeof(syncobj_desc)))) {
655 post_deps[i].point = syncobj_desc.point;
656 post_deps[i].chain = NULL;
658 if (syncobj_desc.flags) {
663 if (syncobj_desc.point) {
664 if (!drm_core_check_feature(dev,
665 DRIVER_SYNCOBJ_TIMELINE)) {
670 post_deps[i].chain = dma_fence_chain_alloc();
671 if (!post_deps[i].chain) {
677 post_deps[i].syncobj =
678 drm_syncobj_find(file, syncobj_desc.handle);
679 if (!post_deps[i].syncobj) {
686 for (j = 0; j <= i; ++j) {
687 dma_fence_chain_free(post_deps[j].chain);
688 if (post_deps[j].syncobj)
689 drm_syncobj_put(post_deps[j].syncobj);
699 static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
700 uint32_t count, struct dma_fence *fence)
704 for (i = 0; post_deps && i < count; ++i) {
705 if (post_deps[i].chain) {
706 drm_syncobj_add_point(post_deps[i].syncobj,
708 fence, post_deps[i].point);
709 post_deps[i].chain = NULL;
711 drm_syncobj_replace_fence(post_deps[i].syncobj,
717 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
718 struct drm_file *file)
720 struct msm_drm_private *priv = dev->dev_private;
721 struct drm_msm_gem_submit *args = data;
722 struct msm_file_private *ctx = file->driver_priv;
723 struct msm_gem_submit *submit;
724 struct msm_gpu *gpu = priv->gpu;
725 struct msm_gpu_submitqueue *queue;
726 struct msm_ringbuffer *ring;
727 struct msm_submit_post_dep *post_deps = NULL;
728 struct drm_syncobj **syncobjs_to_reset = NULL;
729 int out_fence_fd = -1;
730 bool has_ww_ticket = false;
740 if (unlikely(!ctx->aspace) && !capable(CAP_SYS_RAWIO)) {
741 DRM_ERROR_RATELIMITED("IOMMU support or CAP_SYS_RAWIO required!\n");
745 /* for now, we just have 3d pipe.. eventually this would need to
746 * be more clever to dispatch to appropriate gpu module:
748 if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
751 if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
754 if (args->flags & MSM_SUBMIT_SUDO) {
755 if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
756 !capable(CAP_SYS_RAWIO))
760 queue = msm_submitqueue_get(ctx, args->queueid);
764 ring = gpu->rb[queue->ring_nr];
766 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
767 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
768 if (out_fence_fd < 0) {
774 submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
776 return PTR_ERR(submit);
778 trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident,
779 args->nr_bos, args->nr_cmds);
781 ret = mutex_lock_interruptible(&queue->lock);
783 goto out_post_unlock;
785 if (args->flags & MSM_SUBMIT_SUDO)
786 submit->in_rb = true;
788 if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
789 struct dma_fence *in_fence;
791 in_fence = sync_file_get_fence(args->fence_fd);
798 ret = drm_sched_job_add_dependency(&submit->base, in_fence);
803 if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
804 syncobjs_to_reset = msm_parse_deps(submit, file,
806 args->nr_in_syncobjs,
807 args->syncobj_stride, ring);
808 if (IS_ERR(syncobjs_to_reset)) {
809 ret = PTR_ERR(syncobjs_to_reset);
814 if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
815 post_deps = msm_parse_post_deps(dev, file,
817 args->nr_out_syncobjs,
818 args->syncobj_stride);
819 if (IS_ERR(post_deps)) {
820 ret = PTR_ERR(post_deps);
825 ret = submit_lookup_objects(submit, args, file);
829 ret = submit_lookup_cmds(submit, args, file);
833 /* copy_*_user while holding a ww ticket upsets lockdep */
834 ww_acquire_init(&submit->ticket, &reservation_ww_class);
835 has_ww_ticket = true;
836 ret = submit_lock_objects(submit);
840 ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
844 ret = submit_pin_objects(submit);
848 for (i = 0; i < args->nr_cmds; i++) {
849 struct msm_gem_object *msm_obj;
852 ret = submit_bo(submit, submit->cmd[i].idx,
853 &msm_obj, &iova, NULL);
857 if (!submit->cmd[i].size ||
858 ((submit->cmd[i].size + submit->cmd[i].offset) >
859 msm_obj->base.size / 4)) {
860 DRM_ERROR("invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
865 submit->cmd[i].iova = iova + (submit->cmd[i].offset * 4);
870 ret = submit_reloc(submit, msm_obj, submit->cmd[i].offset * 4,
871 submit->cmd[i].nr_relocs, submit->cmd[i].relocs);
878 mutex_lock(&queue->idr_lock);
881 * If using userspace provided seqno fence, validate that the id
882 * is available before arming sched job. Since access to fence_idr
883 * is serialized on the queue lock, the slot should be still avail
884 * after the job is armed
886 if ((args->flags & MSM_SUBMIT_FENCE_SN_IN) &&
887 idr_find(&queue->fence_idr, args->fence)) {
888 mutex_unlock(&queue->idr_lock);
893 drm_sched_job_arm(&submit->base);
895 submit->user_fence = dma_fence_get(&submit->base.s_fence->finished);
897 if (args->flags & MSM_SUBMIT_FENCE_SN_IN) {
899 * Userspace has assigned the seqno fence that it wants
900 * us to use. It is an error to pick a fence sequence
901 * number that is not available.
903 submit->fence_id = args->fence;
904 ret = idr_alloc_u32(&queue->fence_idr, submit->user_fence,
905 &submit->fence_id, submit->fence_id,
908 * We've already validated that the fence_id slot is valid,
909 * so if idr_alloc_u32 failed, it is a kernel bug
914 * Allocate an id which can be used by WAIT_FENCE ioctl to map
915 * back to the underlying fence.
917 submit->fence_id = idr_alloc_cyclic(&queue->fence_idr,
918 submit->user_fence, 1,
919 INT_MAX, GFP_KERNEL);
922 mutex_unlock(&queue->idr_lock);
924 if (submit->fence_id < 0) {
925 ret = submit->fence_id;
926 submit->fence_id = 0;
929 if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
930 struct sync_file *sync_file = sync_file_create(submit->user_fence);
934 fd_install(out_fence_fd, sync_file->file);
935 args->fence_fd = out_fence_fd;
939 submit_attach_object_fences(submit);
941 /* The scheduler owns a ref now: */
942 msm_gem_submit_get(submit);
944 drm_sched_entity_push_job(&submit->base);
946 args->fence = submit->fence_id;
947 queue->last_fence = submit->fence_id;
949 msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
950 msm_process_post_deps(post_deps, args->nr_out_syncobjs,
955 submit_cleanup(submit, !!ret);
957 ww_acquire_fini(&submit->ticket);
959 if (ret && (out_fence_fd >= 0))
960 put_unused_fd(out_fence_fd);
961 mutex_unlock(&queue->lock);
963 msm_gem_submit_put(submit);
964 if (!IS_ERR_OR_NULL(post_deps)) {
965 for (i = 0; i < args->nr_out_syncobjs; ++i) {
966 kfree(post_deps[i].chain);
967 drm_syncobj_put(post_deps[i].syncobj);
972 if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
973 for (i = 0; i < args->nr_in_syncobjs; ++i) {
974 if (syncobjs_to_reset[i])
975 drm_syncobj_put(syncobjs_to_reset[i]);
977 kfree(syncobjs_to_reset);