1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
6 #include "xe_exec_queue.h"
8 #include <linux/nospec.h>
10 #include <drm/drm_device.h>
11 #include <drm/drm_file.h>
12 #include <drm/xe_drm.h>
14 #include "xe_device.h"
16 #include "xe_hw_engine_class_sysfs.h"
17 #include "xe_hw_fence.h"
19 #include "xe_macros.h"
20 #include "xe_migrate.h"
22 #include "xe_ring_ops_types.h"
26 enum xe_exec_queue_sched_prop {
27 XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
28 XE_EXEC_QUEUE_TIMESLICE = 1,
29 XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
30 XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
33 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
34 u64 extensions, int ext_number);
36 static void __xe_exec_queue_free(struct xe_exec_queue *q)
43 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
46 u16 width, struct xe_hw_engine *hwe,
47 u32 flags, u64 extensions)
49 struct xe_exec_queue *q;
50 struct xe_gt *gt = hwe->gt;
53 /* only kernel queues can be permanent */
54 XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
56 q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL);
58 return ERR_PTR(-ENOMEM);
60 kref_init(&q->refcount);
64 q->class = hwe->class;
66 q->logical_mask = logical_mask;
67 q->fence_irq = >->fence_irq[hwe->class];
68 q->ring_ops = gt->ring_ops[hwe->class];
69 q->ops = gt->exec_queue_ops;
70 INIT_LIST_HEAD(&q->compute.link);
71 INIT_LIST_HEAD(&q->multi_gt_link);
73 q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
74 q->sched_props.preempt_timeout_us =
75 hwe->eclass->sched_props.preempt_timeout_us;
76 q->sched_props.job_timeout_ms =
77 hwe->eclass->sched_props.job_timeout_ms;
78 if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
79 q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
80 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
82 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
85 q->vm = xe_vm_get(vm);
89 * may set q->usm, must come before xe_lrc_init(),
90 * may overwrite q->sched_props, must come before q->ops->init()
92 err = exec_queue_user_extensions(xe, q, extensions, 0);
94 __xe_exec_queue_free(q);
99 if (xe_exec_queue_is_parallel(q)) {
100 q->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
101 q->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO;
107 static int __xe_exec_queue_init(struct xe_exec_queue *q)
109 struct xe_device *xe = gt_to_xe(q->gt);
112 for (i = 0; i < q->width; ++i) {
113 err = xe_lrc_init(q->lrc + i, q->hwe, q, q->vm, SZ_16K);
118 err = q->ops->init(q);
123 * Normally the user vm holds an rpm ref to keep the device
124 * awake, and the context holds a ref for the vm, however for
125 * some engines we use the kernels migrate vm underneath which offers no
126 * such rpm ref, or we lack a vm. Make sure we keep a ref here, so we
127 * can perform GuC CT actions when needed. Caller is expected to have
128 * already grabbed the rpm ref outside any sensitive locks.
130 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm))
131 xe_pm_runtime_get_noresume(xe);
136 for (i = i - 1; i >= 0; --i)
137 xe_lrc_finish(q->lrc + i);
141 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
142 u32 logical_mask, u16 width,
143 struct xe_hw_engine *hwe, u32 flags,
146 struct xe_exec_queue *q;
149 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
155 err = xe_vm_lock(vm, true);
160 err = __xe_exec_queue_init(q);
169 __xe_exec_queue_free(q);
173 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
175 enum xe_engine_class class, u32 flags)
177 struct xe_hw_engine *hwe, *hwe0 = NULL;
178 enum xe_hw_engine_id id;
179 u32 logical_mask = 0;
181 for_each_hw_engine(hwe, gt, id) {
182 if (xe_hw_engine_is_reserved(hwe))
185 if (hwe->class == class) {
186 logical_mask |= BIT(hwe->logical_instance);
193 return ERR_PTR(-ENODEV);
195 return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, 0);
198 void xe_exec_queue_destroy(struct kref *ref)
200 struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
201 struct xe_exec_queue *eq, *next;
203 xe_exec_queue_last_fence_put_unlocked(q);
204 if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
205 list_for_each_entry_safe(eq, next, &q->multi_gt_list,
207 xe_exec_queue_put(eq);
213 void xe_exec_queue_fini(struct xe_exec_queue *q)
217 for (i = 0; i < q->width; ++i)
218 xe_lrc_finish(q->lrc + i);
219 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm))
220 xe_pm_runtime_put(gt_to_xe(q->gt));
221 __xe_exec_queue_free(q);
224 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
227 case XE_ENGINE_CLASS_RENDER:
228 snprintf(q->name, sizeof(q->name), "rcs%d", instance);
230 case XE_ENGINE_CLASS_VIDEO_DECODE:
231 snprintf(q->name, sizeof(q->name), "vcs%d", instance);
233 case XE_ENGINE_CLASS_VIDEO_ENHANCE:
234 snprintf(q->name, sizeof(q->name), "vecs%d", instance);
236 case XE_ENGINE_CLASS_COPY:
237 snprintf(q->name, sizeof(q->name), "bcs%d", instance);
239 case XE_ENGINE_CLASS_COMPUTE:
240 snprintf(q->name, sizeof(q->name), "ccs%d", instance);
242 case XE_ENGINE_CLASS_OTHER:
243 snprintf(q->name, sizeof(q->name), "gsccs%d", instance);
246 XE_WARN_ON(q->class);
250 struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
252 struct xe_exec_queue *q;
254 mutex_lock(&xef->exec_queue.lock);
255 q = xa_load(&xef->exec_queue.xa, id);
257 xe_exec_queue_get(q);
258 mutex_unlock(&xef->exec_queue.lock);
263 enum xe_exec_queue_priority
264 xe_exec_queue_device_get_max_priority(struct xe_device *xe)
266 return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
267 XE_EXEC_QUEUE_PRIORITY_NORMAL;
270 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
273 if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
276 if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
279 q->sched_props.priority = value;
283 static bool xe_exec_queue_enforce_schedule_limit(void)
285 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
288 return !capable(CAP_SYS_NICE);
293 xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
294 enum xe_exec_queue_sched_prop prop,
298 case XE_EXEC_QUEUE_JOB_TIMEOUT:
299 *min = eclass->sched_props.job_timeout_min;
300 *max = eclass->sched_props.job_timeout_max;
302 case XE_EXEC_QUEUE_TIMESLICE:
303 *min = eclass->sched_props.timeslice_min;
304 *max = eclass->sched_props.timeslice_max;
306 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
307 *min = eclass->sched_props.preempt_timeout_min;
308 *max = eclass->sched_props.preempt_timeout_max;
313 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
314 if (capable(CAP_SYS_NICE)) {
316 case XE_EXEC_QUEUE_JOB_TIMEOUT:
317 *min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
318 *max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
320 case XE_EXEC_QUEUE_TIMESLICE:
321 *min = XE_HW_ENGINE_TIMESLICE_MIN;
322 *max = XE_HW_ENGINE_TIMESLICE_MAX;
324 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
325 *min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
326 *max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
335 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
338 u32 min = 0, max = 0;
340 xe_exec_queue_get_prop_minmax(q->hwe->eclass,
341 XE_EXEC_QUEUE_TIMESLICE, &min, &max);
343 if (xe_exec_queue_enforce_schedule_limit() &&
344 !xe_hw_engine_timeout_in_range(value, min, max))
347 q->sched_props.timeslice_us = value;
351 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
352 struct xe_exec_queue *q,
355 static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
356 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
357 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
360 static int exec_queue_user_ext_set_property(struct xe_device *xe,
361 struct xe_exec_queue *q,
364 u64 __user *address = u64_to_user_ptr(extension);
365 struct drm_xe_ext_set_property ext;
369 err = __copy_from_user(&ext, address, sizeof(ext));
370 if (XE_IOCTL_DBG(xe, err))
373 if (XE_IOCTL_DBG(xe, ext.property >=
374 ARRAY_SIZE(exec_queue_set_property_funcs)) ||
375 XE_IOCTL_DBG(xe, ext.pad) ||
376 XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
377 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
380 idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
381 if (!exec_queue_set_property_funcs[idx])
384 return exec_queue_set_property_funcs[idx](xe, q, ext.value);
387 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
388 struct xe_exec_queue *q,
391 static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] = {
392 [DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
395 #define MAX_USER_EXTENSIONS 16
396 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
397 u64 extensions, int ext_number)
399 u64 __user *address = u64_to_user_ptr(extensions);
400 struct drm_xe_user_extension ext;
404 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
407 err = __copy_from_user(&ext, address, sizeof(ext));
408 if (XE_IOCTL_DBG(xe, err))
411 if (XE_IOCTL_DBG(xe, ext.pad) ||
412 XE_IOCTL_DBG(xe, ext.name >=
413 ARRAY_SIZE(exec_queue_user_extension_funcs)))
416 idx = array_index_nospec(ext.name,
417 ARRAY_SIZE(exec_queue_user_extension_funcs));
418 err = exec_queue_user_extension_funcs[idx](xe, q, extensions);
419 if (XE_IOCTL_DBG(xe, err))
422 if (ext.next_extension)
423 return exec_queue_user_extensions(xe, q, ext.next_extension,
429 static const enum xe_engine_class user_to_xe_engine_class[] = {
430 [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
431 [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
432 [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
433 [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
434 [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
437 static struct xe_hw_engine *
438 find_hw_engine(struct xe_device *xe,
439 struct drm_xe_engine_class_instance eci)
443 if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
446 if (eci.gt_id >= xe->info.gt_count)
449 idx = array_index_nospec(eci.engine_class,
450 ARRAY_SIZE(user_to_xe_engine_class));
452 return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
453 user_to_xe_engine_class[idx],
454 eci.engine_instance, true);
457 static u32 bind_exec_queue_logical_mask(struct xe_device *xe, struct xe_gt *gt,
458 struct drm_xe_engine_class_instance *eci,
459 u16 width, u16 num_placements)
461 struct xe_hw_engine *hwe;
462 enum xe_hw_engine_id id;
463 u32 logical_mask = 0;
465 if (XE_IOCTL_DBG(xe, width != 1))
467 if (XE_IOCTL_DBG(xe, num_placements != 1))
469 if (XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
472 eci[0].engine_class = DRM_XE_ENGINE_CLASS_COPY;
474 for_each_hw_engine(hwe, gt, id) {
475 if (xe_hw_engine_is_reserved(hwe))
479 user_to_xe_engine_class[DRM_XE_ENGINE_CLASS_COPY])
480 logical_mask |= BIT(hwe->logical_instance);
486 static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
487 struct drm_xe_engine_class_instance *eci,
488 u16 width, u16 num_placements)
490 int len = width * num_placements;
494 u32 return_mask = 0, prev_mask;
496 if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) &&
500 for (i = 0; i < width; ++i) {
501 u32 current_mask = 0;
503 for (j = 0; j < num_placements; ++j) {
504 struct xe_hw_engine *hwe;
508 hwe = find_hw_engine(xe, eci[n]);
509 if (XE_IOCTL_DBG(xe, !hwe))
512 if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
515 if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
516 XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
519 class = eci[n].engine_class;
520 gt_id = eci[n].gt_id;
522 if (width == 1 || !i)
523 return_mask |= BIT(eci[n].engine_instance);
524 current_mask |= BIT(eci[n].engine_instance);
527 /* Parallel submissions must be logically contiguous */
528 if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
531 prev_mask = current_mask;
537 int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
538 struct drm_file *file)
540 struct xe_device *xe = to_xe_device(dev);
541 struct xe_file *xef = to_xe_file(file);
542 struct drm_xe_exec_queue_create *args = data;
543 struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
544 struct drm_xe_engine_class_instance __user *user_eci =
545 u64_to_user_ptr(args->instances);
546 struct xe_hw_engine *hwe;
547 struct xe_vm *vm, *migrate_vm;
549 struct xe_exec_queue *q = NULL;
555 if (XE_IOCTL_DBG(xe, args->flags) ||
556 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
559 len = args->width * args->num_placements;
560 if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
563 err = __copy_from_user(eci, user_eci,
564 sizeof(struct drm_xe_engine_class_instance) *
566 if (XE_IOCTL_DBG(xe, err))
569 if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
572 if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
573 for_each_gt(gt, xe, id) {
574 struct xe_exec_queue *new;
577 if (xe_gt_is_media_type(gt))
580 eci[0].gt_id = gt->info.id;
581 logical_mask = bind_exec_queue_logical_mask(xe, gt, eci,
583 args->num_placements);
584 if (XE_IOCTL_DBG(xe, !logical_mask))
587 hwe = find_hw_engine(xe, eci[0]);
588 if (XE_IOCTL_DBG(xe, !hwe))
591 /* The migration vm doesn't hold rpm ref */
592 xe_pm_runtime_get_noresume(xe);
594 flags = EXEC_QUEUE_FLAG_VM | (id ? EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0);
596 migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate);
597 new = xe_exec_queue_create(xe, migrate_vm, logical_mask,
598 args->width, hwe, flags,
601 xe_pm_runtime_put(xe); /* now held by engine */
603 xe_vm_put(migrate_vm);
613 list_add_tail(&new->multi_gt_list,
617 gt = xe_device_get_gt(xe, eci[0].gt_id);
618 logical_mask = calc_validate_logical_mask(xe, gt, eci,
620 args->num_placements);
621 if (XE_IOCTL_DBG(xe, !logical_mask))
624 hwe = find_hw_engine(xe, eci[0]);
625 if (XE_IOCTL_DBG(xe, !hwe))
628 vm = xe_vm_lookup(xef, args->vm_id);
629 if (XE_IOCTL_DBG(xe, !vm))
632 err = down_read_interruptible(&vm->lock);
638 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
644 q = xe_exec_queue_create(xe, vm, logical_mask,
652 if (xe_vm_in_preempt_fence_mode(vm)) {
653 q->compute.context = dma_fence_context_alloc(1);
654 spin_lock_init(&q->compute.lock);
656 err = xe_vm_add_compute_exec_queue(vm, q);
657 if (XE_IOCTL_DBG(xe, err))
662 mutex_lock(&xef->exec_queue.lock);
663 err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
664 mutex_unlock(&xef->exec_queue.lock);
666 goto kill_exec_queue;
668 args->exec_queue_id = id;
673 xe_exec_queue_kill(q);
675 xe_exec_queue_put(q);
679 int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
680 struct drm_file *file)
682 struct xe_device *xe = to_xe_device(dev);
683 struct xe_file *xef = to_xe_file(file);
684 struct drm_xe_exec_queue_get_property *args = data;
685 struct xe_exec_queue *q;
688 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
691 q = xe_exec_queue_lookup(xef, args->exec_queue_id);
692 if (XE_IOCTL_DBG(xe, !q))
695 switch (args->property) {
696 case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
697 args->value = !!(q->flags & EXEC_QUEUE_FLAG_BANNED);
704 xe_exec_queue_put(q);
710 * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
713 * Return: True if the exec_queue is long-running, false otherwise.
715 bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
717 return q->vm && xe_vm_in_lr_mode(q->vm) &&
718 !(q->flags & EXEC_QUEUE_FLAG_VM);
721 static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
723 return q->lrc->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc) - 1;
727 * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
730 * Return: True if the exec_queue's ring is full, false otherwise.
732 bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
734 struct xe_lrc *lrc = q->lrc;
735 s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
737 return xe_exec_queue_num_job_inflight(q) >= max_job;
741 * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
744 * FIXME: Need to determine what to use as the short-lived
745 * timeline lock for the exec_queues, so that the return value
746 * of this function becomes more than just an advisory
747 * snapshot in time. The timeline lock must protect the
748 * seqno from racing submissions on the same exec_queue.
749 * Typically vm->resv, but user-created timeline locks use the migrate vm
750 * and never grabs the migrate vm->resv so we have a race there.
752 * Return: True if the exec_queue is idle, false otherwise.
754 bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
756 if (xe_exec_queue_is_parallel(q)) {
759 for (i = 0; i < q->width; ++i) {
760 if (xe_lrc_seqno(&q->lrc[i]) !=
761 q->lrc[i].fence_ctx.next_seqno - 1)
768 return xe_lrc_seqno(&q->lrc[0]) ==
769 q->lrc[0].fence_ctx.next_seqno - 1;
772 void xe_exec_queue_kill(struct xe_exec_queue *q)
774 struct xe_exec_queue *eq = q, *next;
776 list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
779 xe_vm_remove_compute_exec_queue(q->vm, eq);
783 xe_vm_remove_compute_exec_queue(q->vm, q);
786 int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
787 struct drm_file *file)
789 struct xe_device *xe = to_xe_device(dev);
790 struct xe_file *xef = to_xe_file(file);
791 struct drm_xe_exec_queue_destroy *args = data;
792 struct xe_exec_queue *q;
794 if (XE_IOCTL_DBG(xe, args->pad) ||
795 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
798 mutex_lock(&xef->exec_queue.lock);
799 q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
800 mutex_unlock(&xef->exec_queue.lock);
801 if (XE_IOCTL_DBG(xe, !q))
804 xe_exec_queue_kill(q);
806 trace_xe_exec_queue_close(q);
807 xe_exec_queue_put(q);
812 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
815 if (q->flags & EXEC_QUEUE_FLAG_VM)
816 lockdep_assert_held(&vm->lock);
818 xe_vm_assert_held(vm);
822 * xe_exec_queue_last_fence_put() - Drop ref to last fence
824 * @vm: The VM the engine does a bind or exec for
826 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
828 xe_exec_queue_last_fence_lockdep_assert(q, vm);
831 dma_fence_put(q->last_fence);
832 q->last_fence = NULL;
837 * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
840 * Only safe to be called from xe_exec_queue_destroy().
842 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
845 dma_fence_put(q->last_fence);
846 q->last_fence = NULL;
851 * xe_exec_queue_last_fence_get() - Get last fence
853 * @vm: The VM the engine does a bind or exec for
855 * Get last fence, takes a ref
857 * Returns: last fence if not signaled, dma fence stub if signaled
859 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
862 struct dma_fence *fence;
864 xe_exec_queue_last_fence_lockdep_assert(q, vm);
867 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
868 xe_exec_queue_last_fence_put(q, vm);
870 fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
871 dma_fence_get(fence);
876 * xe_exec_queue_last_fence_set() - Set last fence
878 * @vm: The VM the engine does a bind or exec for
881 * Set the last fence for the engine. Increases reference count for fence, when
882 * closing engine xe_exec_queue_last_fence_put should be called.
884 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
885 struct dma_fence *fence)
887 xe_exec_queue_last_fence_lockdep_assert(q, vm);
889 xe_exec_queue_last_fence_put(q, vm);
890 q->last_fence = dma_fence_get(fence);