1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
6 #include "xe_exec_queue.h"
8 #include <linux/nospec.h>
10 #include <drm/drm_device.h>
11 #include <drm/drm_drv.h>
12 #include <drm/drm_file.h>
13 #include <uapi/drm/xe_drm.h>
15 #include "xe_device.h"
17 #include "xe_hw_engine_class_sysfs.h"
18 #include "xe_hw_engine_group.h"
19 #include "xe_hw_fence.h"
22 #include "xe_macros.h"
23 #include "xe_migrate.h"
25 #include "xe_ring_ops_types.h"
29 enum xe_exec_queue_sched_prop {
30 XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
31 XE_EXEC_QUEUE_TIMESLICE = 1,
32 XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
33 XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
36 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
37 u64 extensions, int ext_number);
39 static void __xe_exec_queue_free(struct xe_exec_queue *q)
50 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
53 u16 width, struct xe_hw_engine *hwe,
54 u32 flags, u64 extensions)
56 struct xe_exec_queue *q;
57 struct xe_gt *gt = hwe->gt;
60 /* only kernel queues can be permanent */
61 XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
63 q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL);
65 return ERR_PTR(-ENOMEM);
67 kref_init(&q->refcount);
71 q->class = hwe->class;
73 q->msix_vec = XE_IRQ_DEFAULT_MSIX;
74 q->logical_mask = logical_mask;
75 q->fence_irq = >->fence_irq[hwe->class];
76 q->ring_ops = gt->ring_ops[hwe->class];
77 q->ops = gt->exec_queue_ops;
78 INIT_LIST_HEAD(&q->lr.link);
79 INIT_LIST_HEAD(&q->multi_gt_link);
80 INIT_LIST_HEAD(&q->hw_engine_group_link);
82 q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
83 q->sched_props.preempt_timeout_us =
84 hwe->eclass->sched_props.preempt_timeout_us;
85 q->sched_props.job_timeout_ms =
86 hwe->eclass->sched_props.job_timeout_ms;
87 if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
88 q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
89 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
91 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
94 q->vm = xe_vm_get(vm);
98 * may set q->usm, must come before xe_lrc_create(),
99 * may overwrite q->sched_props, must come before q->ops->init()
101 err = exec_queue_user_extensions(xe, q, extensions, 0);
103 __xe_exec_queue_free(q);
111 static int __xe_exec_queue_init(struct xe_exec_queue *q)
113 struct xe_vm *vm = q->vm;
117 err = xe_vm_lock(vm, true);
122 for (i = 0; i < q->width; ++i) {
123 q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec);
124 if (IS_ERR(q->lrc[i])) {
125 err = PTR_ERR(q->lrc[i]);
133 err = q->ops->init(q);
143 for (i = i - 1; i >= 0; --i)
144 xe_lrc_put(q->lrc[i]);
148 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
149 u32 logical_mask, u16 width,
150 struct xe_hw_engine *hwe, u32 flags,
153 struct xe_exec_queue *q;
156 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
161 err = __xe_exec_queue_init(q);
168 __xe_exec_queue_free(q);
172 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
174 enum xe_engine_class class,
175 u32 flags, u64 extensions)
177 struct xe_hw_engine *hwe, *hwe0 = NULL;
178 enum xe_hw_engine_id id;
179 u32 logical_mask = 0;
181 for_each_hw_engine(hwe, gt, id) {
182 if (xe_hw_engine_is_reserved(hwe))
185 if (hwe->class == class) {
186 logical_mask |= BIT(hwe->logical_instance);
193 return ERR_PTR(-ENODEV);
195 return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions);
199 * xe_exec_queue_create_bind() - Create bind exec queue.
201 * @tile: tile which bind exec queue belongs to.
202 * @flags: exec queue creation flags
203 * @extensions: exec queue creation extensions
205 * Normalize bind exec queue creation. Bind exec queue is tied to migration VM
206 * for access to physical memory required for page table programming. On a
207 * faulting devices the reserved copy engine instance must be used to avoid
208 * deadlocking (user binds cannot get stuck behind faults as kernel binds which
209 * resolve faults depend on user binds). On non-faulting devices any copy engine
212 * Returns exec queue on success, ERR_PTR on failure
214 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
215 struct xe_tile *tile,
216 u32 flags, u64 extensions)
218 struct xe_gt *gt = tile->primary_gt;
219 struct xe_exec_queue *q;
220 struct xe_vm *migrate_vm;
222 migrate_vm = xe_migrate_get_vm(tile->migrate);
223 if (xe->info.has_usm) {
224 struct xe_hw_engine *hwe = xe_gt_hw_engine(gt,
225 XE_ENGINE_CLASS_COPY,
226 gt->usm.reserved_bcs_instance,
230 xe_vm_put(migrate_vm);
231 return ERR_PTR(-EINVAL);
234 q = xe_exec_queue_create(xe, migrate_vm,
235 BIT(hwe->logical_instance), 1, hwe,
238 q = xe_exec_queue_create_class(xe, gt, migrate_vm,
239 XE_ENGINE_CLASS_COPY, flags,
242 xe_vm_put(migrate_vm);
246 ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
248 void xe_exec_queue_destroy(struct kref *ref)
250 struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
251 struct xe_exec_queue *eq, *next;
253 xe_exec_queue_last_fence_put_unlocked(q);
254 if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
255 list_for_each_entry_safe(eq, next, &q->multi_gt_list,
257 xe_exec_queue_put(eq);
263 void xe_exec_queue_fini(struct xe_exec_queue *q)
268 * Before releasing our ref to lrc and xef, accumulate our run ticks
269 * and wakeup any waiters.
271 xe_exec_queue_update_run_ticks(q);
272 if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
273 wake_up_var(&q->xef->exec_queue.pending_removal);
275 for (i = 0; i < q->width; ++i)
276 xe_lrc_put(q->lrc[i]);
278 __xe_exec_queue_free(q);
281 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
284 case XE_ENGINE_CLASS_RENDER:
285 snprintf(q->name, sizeof(q->name), "rcs%d", instance);
287 case XE_ENGINE_CLASS_VIDEO_DECODE:
288 snprintf(q->name, sizeof(q->name), "vcs%d", instance);
290 case XE_ENGINE_CLASS_VIDEO_ENHANCE:
291 snprintf(q->name, sizeof(q->name), "vecs%d", instance);
293 case XE_ENGINE_CLASS_COPY:
294 snprintf(q->name, sizeof(q->name), "bcs%d", instance);
296 case XE_ENGINE_CLASS_COMPUTE:
297 snprintf(q->name, sizeof(q->name), "ccs%d", instance);
299 case XE_ENGINE_CLASS_OTHER:
300 snprintf(q->name, sizeof(q->name), "gsccs%d", instance);
303 XE_WARN_ON(q->class);
307 struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
309 struct xe_exec_queue *q;
311 mutex_lock(&xef->exec_queue.lock);
312 q = xa_load(&xef->exec_queue.xa, id);
314 xe_exec_queue_get(q);
315 mutex_unlock(&xef->exec_queue.lock);
320 enum xe_exec_queue_priority
321 xe_exec_queue_device_get_max_priority(struct xe_device *xe)
323 return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
324 XE_EXEC_QUEUE_PRIORITY_NORMAL;
327 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
330 if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
333 if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
336 q->sched_props.priority = value;
340 static bool xe_exec_queue_enforce_schedule_limit(void)
342 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
345 return !capable(CAP_SYS_NICE);
350 xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
351 enum xe_exec_queue_sched_prop prop,
355 case XE_EXEC_QUEUE_JOB_TIMEOUT:
356 *min = eclass->sched_props.job_timeout_min;
357 *max = eclass->sched_props.job_timeout_max;
359 case XE_EXEC_QUEUE_TIMESLICE:
360 *min = eclass->sched_props.timeslice_min;
361 *max = eclass->sched_props.timeslice_max;
363 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
364 *min = eclass->sched_props.preempt_timeout_min;
365 *max = eclass->sched_props.preempt_timeout_max;
370 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
371 if (capable(CAP_SYS_NICE)) {
373 case XE_EXEC_QUEUE_JOB_TIMEOUT:
374 *min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
375 *max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
377 case XE_EXEC_QUEUE_TIMESLICE:
378 *min = XE_HW_ENGINE_TIMESLICE_MIN;
379 *max = XE_HW_ENGINE_TIMESLICE_MAX;
381 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
382 *min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
383 *max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
392 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
395 u32 min = 0, max = 0;
397 xe_exec_queue_get_prop_minmax(q->hwe->eclass,
398 XE_EXEC_QUEUE_TIMESLICE, &min, &max);
400 if (xe_exec_queue_enforce_schedule_limit() &&
401 !xe_hw_engine_timeout_in_range(value, min, max))
404 q->sched_props.timeslice_us = value;
408 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
409 struct xe_exec_queue *q,
412 static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
413 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
414 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
417 static int exec_queue_user_ext_set_property(struct xe_device *xe,
418 struct xe_exec_queue *q,
421 u64 __user *address = u64_to_user_ptr(extension);
422 struct drm_xe_ext_set_property ext;
426 err = __copy_from_user(&ext, address, sizeof(ext));
427 if (XE_IOCTL_DBG(xe, err))
430 if (XE_IOCTL_DBG(xe, ext.property >=
431 ARRAY_SIZE(exec_queue_set_property_funcs)) ||
432 XE_IOCTL_DBG(xe, ext.pad) ||
433 XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
434 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
437 idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
438 if (!exec_queue_set_property_funcs[idx])
441 return exec_queue_set_property_funcs[idx](xe, q, ext.value);
444 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
445 struct xe_exec_queue *q,
448 static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] = {
449 [DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
452 #define MAX_USER_EXTENSIONS 16
453 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
454 u64 extensions, int ext_number)
456 u64 __user *address = u64_to_user_ptr(extensions);
457 struct drm_xe_user_extension ext;
461 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
464 err = __copy_from_user(&ext, address, sizeof(ext));
465 if (XE_IOCTL_DBG(xe, err))
468 if (XE_IOCTL_DBG(xe, ext.pad) ||
469 XE_IOCTL_DBG(xe, ext.name >=
470 ARRAY_SIZE(exec_queue_user_extension_funcs)))
473 idx = array_index_nospec(ext.name,
474 ARRAY_SIZE(exec_queue_user_extension_funcs));
475 err = exec_queue_user_extension_funcs[idx](xe, q, extensions);
476 if (XE_IOCTL_DBG(xe, err))
479 if (ext.next_extension)
480 return exec_queue_user_extensions(xe, q, ext.next_extension,
486 static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
487 struct drm_xe_engine_class_instance *eci,
488 u16 width, u16 num_placements)
490 int len = width * num_placements;
494 u32 return_mask = 0, prev_mask;
496 if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) &&
500 for (i = 0; i < width; ++i) {
501 u32 current_mask = 0;
503 for (j = 0; j < num_placements; ++j) {
504 struct xe_hw_engine *hwe;
508 hwe = xe_hw_engine_lookup(xe, eci[n]);
509 if (XE_IOCTL_DBG(xe, !hwe))
512 if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
515 if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
516 XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
519 class = eci[n].engine_class;
520 gt_id = eci[n].gt_id;
522 if (width == 1 || !i)
523 return_mask |= BIT(eci[n].engine_instance);
524 current_mask |= BIT(eci[n].engine_instance);
527 /* Parallel submissions must be logically contiguous */
528 if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
531 prev_mask = current_mask;
537 int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
538 struct drm_file *file)
540 struct xe_device *xe = to_xe_device(dev);
541 struct xe_file *xef = to_xe_file(file);
542 struct drm_xe_exec_queue_create *args = data;
543 struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
544 struct drm_xe_engine_class_instance __user *user_eci =
545 u64_to_user_ptr(args->instances);
546 struct xe_hw_engine *hwe;
549 struct xe_tile *tile;
550 struct xe_exec_queue *q = NULL;
556 if (XE_IOCTL_DBG(xe, args->flags) ||
557 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
560 len = args->width * args->num_placements;
561 if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
564 err = __copy_from_user(eci, user_eci,
565 sizeof(struct drm_xe_engine_class_instance) *
567 if (XE_IOCTL_DBG(xe, err))
570 if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
573 if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
574 if (XE_IOCTL_DBG(xe, args->width != 1) ||
575 XE_IOCTL_DBG(xe, args->num_placements != 1) ||
576 XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
579 for_each_tile(tile, xe, id) {
580 struct xe_exec_queue *new;
581 u32 flags = EXEC_QUEUE_FLAG_VM;
584 flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
586 new = xe_exec_queue_create_bind(xe, tile, flags,
597 list_add_tail(&new->multi_gt_list,
601 gt = xe_device_get_gt(xe, eci[0].gt_id);
602 logical_mask = calc_validate_logical_mask(xe, gt, eci,
604 args->num_placements);
605 if (XE_IOCTL_DBG(xe, !logical_mask))
608 hwe = xe_hw_engine_lookup(xe, eci[0]);
609 if (XE_IOCTL_DBG(xe, !hwe))
612 vm = xe_vm_lookup(xef, args->vm_id);
613 if (XE_IOCTL_DBG(xe, !vm))
616 err = down_read_interruptible(&vm->lock);
622 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
628 q = xe_exec_queue_create(xe, vm, logical_mask,
636 if (xe_vm_in_preempt_fence_mode(vm)) {
637 q->lr.context = dma_fence_context_alloc(1);
639 err = xe_vm_add_compute_exec_queue(vm, q);
640 if (XE_IOCTL_DBG(xe, err))
644 if (q->vm && q->hwe->hw_engine_group) {
645 err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q);
651 q->xef = xe_file_get(xef);
653 /* user id alloc must always be last in ioctl to prevent UAF */
654 err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
656 goto kill_exec_queue;
658 args->exec_queue_id = id;
663 xe_exec_queue_kill(q);
665 xe_exec_queue_put(q);
669 int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
670 struct drm_file *file)
672 struct xe_device *xe = to_xe_device(dev);
673 struct xe_file *xef = to_xe_file(file);
674 struct drm_xe_exec_queue_get_property *args = data;
675 struct xe_exec_queue *q;
678 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
681 q = xe_exec_queue_lookup(xef, args->exec_queue_id);
682 if (XE_IOCTL_DBG(xe, !q))
685 switch (args->property) {
686 case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
687 args->value = q->ops->reset_status(q);
694 xe_exec_queue_put(q);
700 * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
703 * Return: True if the exec_queue is long-running, false otherwise.
705 bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
707 return q->vm && xe_vm_in_lr_mode(q->vm) &&
708 !(q->flags & EXEC_QUEUE_FLAG_VM);
711 static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
713 return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1;
717 * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
720 * Return: True if the exec_queue's ring is full, false otherwise.
722 bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
724 struct xe_lrc *lrc = q->lrc[0];
725 s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
727 return xe_exec_queue_num_job_inflight(q) >= max_job;
731 * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
734 * FIXME: Need to determine what to use as the short-lived
735 * timeline lock for the exec_queues, so that the return value
736 * of this function becomes more than just an advisory
737 * snapshot in time. The timeline lock must protect the
738 * seqno from racing submissions on the same exec_queue.
739 * Typically vm->resv, but user-created timeline locks use the migrate vm
740 * and never grabs the migrate vm->resv so we have a race there.
742 * Return: True if the exec_queue is idle, false otherwise.
744 bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
746 if (xe_exec_queue_is_parallel(q)) {
749 for (i = 0; i < q->width; ++i) {
750 if (xe_lrc_seqno(q->lrc[i]) !=
751 q->lrc[i]->fence_ctx.next_seqno - 1)
758 return xe_lrc_seqno(q->lrc[0]) ==
759 q->lrc[0]->fence_ctx.next_seqno - 1;
763 * xe_exec_queue_update_run_ticks() - Update run time in ticks for this exec queue
767 * Update the timestamp saved by HW for this exec queue and save run ticks
768 * calculated by using the delta from last update.
770 void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
772 struct xe_device *xe = gt_to_xe(q->gt);
778 * Jobs that are executed by kernel doesn't have a corresponding xe_file
779 * and thus are not accounted.
784 /* Synchronize with unbind while holding the xe file open */
785 if (!drm_dev_enter(&xe->drm, &idx))
788 * Only sample the first LRC. For parallel submission, all of them are
789 * scheduled together and we compensate that below by multiplying by
790 * width - this may introduce errors if that premise is not true and
791 * they don't exit 100% aligned. On the other hand, looping through
792 * the LRCs and reading them in different time could also introduce
796 new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
797 q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
803 * xe_exec_queue_kill - permanently stop all execution from an exec queue
806 * This function permanently stops all activity on an exec queue. If the queue
807 * is actively executing on the HW, it will be kicked off the engine; any
808 * pending jobs are discarded and all future submissions are rejected.
809 * This function is safe to call multiple times.
811 void xe_exec_queue_kill(struct xe_exec_queue *q)
813 struct xe_exec_queue *eq = q, *next;
815 list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
818 xe_vm_remove_compute_exec_queue(q->vm, eq);
822 xe_vm_remove_compute_exec_queue(q->vm, q);
825 int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
826 struct drm_file *file)
828 struct xe_device *xe = to_xe_device(dev);
829 struct xe_file *xef = to_xe_file(file);
830 struct drm_xe_exec_queue_destroy *args = data;
831 struct xe_exec_queue *q;
833 if (XE_IOCTL_DBG(xe, args->pad) ||
834 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
837 mutex_lock(&xef->exec_queue.lock);
838 q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
840 atomic_inc(&xef->exec_queue.pending_removal);
841 mutex_unlock(&xef->exec_queue.lock);
843 if (XE_IOCTL_DBG(xe, !q))
846 if (q->vm && q->hwe->hw_engine_group)
847 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
849 xe_exec_queue_kill(q);
851 trace_xe_exec_queue_close(q);
852 xe_exec_queue_put(q);
857 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
860 if (q->flags & EXEC_QUEUE_FLAG_VM) {
861 lockdep_assert_held(&vm->lock);
863 xe_vm_assert_held(vm);
864 lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem);
869 * xe_exec_queue_last_fence_put() - Drop ref to last fence
871 * @vm: The VM the engine does a bind or exec for
873 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
875 xe_exec_queue_last_fence_lockdep_assert(q, vm);
877 xe_exec_queue_last_fence_put_unlocked(q);
881 * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
884 * Only safe to be called from xe_exec_queue_destroy().
886 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
889 dma_fence_put(q->last_fence);
890 q->last_fence = NULL;
895 * xe_exec_queue_last_fence_get() - Get last fence
897 * @vm: The VM the engine does a bind or exec for
899 * Get last fence, takes a ref
901 * Returns: last fence if not signaled, dma fence stub if signaled
903 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
906 struct dma_fence *fence;
908 xe_exec_queue_last_fence_lockdep_assert(q, vm);
911 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
912 xe_exec_queue_last_fence_put(q, vm);
914 fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
915 dma_fence_get(fence);
920 * xe_exec_queue_last_fence_get_for_resume() - Get last fence
922 * @vm: The VM the engine does a bind or exec for
924 * Get last fence, takes a ref. Only safe to be called in the context of
925 * resuming the hw engine group's long-running exec queue, when the group
928 * Returns: last fence if not signaled, dma fence stub if signaled
930 struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q,
933 struct dma_fence *fence;
935 lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem);
938 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
939 xe_exec_queue_last_fence_put_unlocked(q);
941 fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
942 dma_fence_get(fence);
947 * xe_exec_queue_last_fence_set() - Set last fence
949 * @vm: The VM the engine does a bind or exec for
952 * Set the last fence for the engine. Increases reference count for fence, when
953 * closing engine xe_exec_queue_last_fence_put should be called.
955 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
956 struct dma_fence *fence)
958 xe_exec_queue_last_fence_lockdep_assert(q, vm);
960 xe_exec_queue_last_fence_put(q, vm);
961 q->last_fence = dma_fence_get(fence);
965 * xe_exec_queue_last_fence_test_dep - Test last fence dependency of queue
967 * @vm: The VM the engine does a bind or exec for
970 * -ETIME if there exists an unsignalled last fence dependency, zero otherwise.
972 int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm)
974 struct dma_fence *fence;
977 fence = xe_exec_queue_last_fence_get(q, vm);
979 err = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ?
981 dma_fence_put(fence);