]> Git Repo - linux.git/blame - drivers/gpu/drm/xe/xe_exec_queue.c
Linux 6.14-rc3
[linux.git] / drivers / gpu / drm / xe / xe_exec_queue.c
CommitLineData
dd08ebf6
MB
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
c22a4ed0 6#include "xe_exec_queue.h"
dd08ebf6 7
ea9f879d
LDM
8#include <linux/nospec.h>
9
dd08ebf6 10#include <drm/drm_device.h>
4ca1fd41 11#include <drm/drm_drv.h>
dd08ebf6 12#include <drm/drm_file.h>
87d8ecf0 13#include <uapi/drm/xe_drm.h>
dd08ebf6
MB
14
15#include "xe_device.h"
16#include "xe_gt.h"
d2776564 17#include "xe_hw_engine_class_sysfs.h"
7970cb36 18#include "xe_hw_engine_group.h"
7c51050b 19#include "xe_hw_fence.h"
21d07f5f 20#include "xe_irq.h"
dd08ebf6
MB
21#include "xe_lrc.h"
22#include "xe_macros.h"
23#include "xe_migrate.h"
24#include "xe_pm.h"
8ae8a2e8 25#include "xe_ring_ops_types.h"
dd08ebf6
MB
26#include "xe_trace.h"
27#include "xe_vm.h"
28
d2776564
TU
29enum xe_exec_queue_sched_prop {
30 XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
31 XE_EXEC_QUEUE_TIMESLICE = 1,
32 XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
33 XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
34};
35
25ce7c50 36static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
79f944ee 37 u64 extensions, int ext_number);
25ce7c50 38
260fa80d
NV
39static void __xe_exec_queue_free(struct xe_exec_queue *q)
40{
41 if (q->vm)
42 xe_vm_put(q->vm);
2149ded6
UNR
43
44 if (q->xef)
45 xe_file_put(q->xef);
46
260fa80d
NV
47 kfree(q);
48}
49
6e144a7d
BW
50static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
51 struct xe_vm *vm,
52 u32 logical_mask,
53 u16 width, struct xe_hw_engine *hwe,
25ce7c50 54 u32 flags, u64 extensions)
dd08ebf6 55{
9b9529ce 56 struct xe_exec_queue *q;
dd08ebf6 57 struct xe_gt *gt = hwe->gt;
25ce7c50 58 int err;
dd08ebf6 59
923e4238
DCS
60 /* only kernel queues can be permanent */
61 XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
62
a7a3d736 63 q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL);
9b9529ce 64 if (!q)
dd08ebf6
MB
65 return ERR_PTR(-ENOMEM);
66
9b9529ce
FD
67 kref_init(&q->refcount);
68 q->flags = flags;
69 q->hwe = hwe;
70 q->gt = gt;
9b9529ce
FD
71 q->class = hwe->class;
72 q->width = width;
21d07f5f 73 q->msix_vec = XE_IRQ_DEFAULT_MSIX;
9b9529ce
FD
74 q->logical_mask = logical_mask;
75 q->fence_irq = &gt->fence_irq[hwe->class];
76 q->ring_ops = gt->ring_ops[hwe->class];
77 q->ops = gt->exec_queue_ops;
731e46c0 78 INIT_LIST_HEAD(&q->lr.link);
9b9529ce 79 INIT_LIST_HEAD(&q->multi_gt_link);
7970cb36 80 INIT_LIST_HEAD(&q->hw_engine_group_link);
dd08ebf6 81
eef55700
TU
82 q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
83 q->sched_props.preempt_timeout_us =
84 hwe->eclass->sched_props.preempt_timeout_us;
6ae24344
BW
85 q->sched_props.job_timeout_ms =
86 hwe->eclass->sched_props.job_timeout_ms;
a8004af3
BW
87 if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
88 q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
89 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
90 else
91 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
dd08ebf6 92
260fa80d
NV
93 if (vm)
94 q->vm = xe_vm_get(vm);
95
25ce7c50
BW
96 if (extensions) {
97 /*
264eecdb 98 * may set q->usm, must come before xe_lrc_create(),
25ce7c50
BW
99 * may overwrite q->sched_props, must come before q->ops->init()
100 */
79f944ee 101 err = exec_queue_user_extensions(xe, q, extensions, 0);
25ce7c50 102 if (err) {
260fa80d 103 __xe_exec_queue_free(q);
25ce7c50
BW
104 return ERR_PTR(err);
105 }
106 }
107
6e144a7d
BW
108 return q;
109}
110
6e144a7d
BW
111static int __xe_exec_queue_init(struct xe_exec_queue *q)
112{
549dd786 113 struct xe_vm *vm = q->vm;
6e144a7d
BW
114 int i, err;
115
549dd786
MB
116 if (vm) {
117 err = xe_vm_lock(vm, true);
118 if (err)
119 return err;
120 }
121
6e144a7d 122 for (i = 0; i < q->width; ++i) {
21d07f5f 123 q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec);
264eecdb
NV
124 if (IS_ERR(q->lrc[i])) {
125 err = PTR_ERR(q->lrc[i]);
549dd786 126 goto err_unlock;
264eecdb 127 }
dd08ebf6
MB
128 }
129
549dd786
MB
130 if (vm)
131 xe_vm_unlock(vm);
132
9b9529ce 133 err = q->ops->init(q);
dd08ebf6
MB
134 if (err)
135 goto err_lrc;
136
6e144a7d 137 return 0;
dd08ebf6 138
549dd786
MB
139err_unlock:
140 if (vm)
141 xe_vm_unlock(vm);
dd08ebf6
MB
142err_lrc:
143 for (i = i - 1; i >= 0; --i)
264eecdb 144 xe_lrc_put(q->lrc[i]);
6e144a7d 145 return err;
dd08ebf6
MB
146}
147
9b9529ce
FD
148struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
149 u32 logical_mask, u16 width,
25ce7c50
BW
150 struct xe_hw_engine *hwe, u32 flags,
151 u64 extensions)
dd08ebf6 152{
9b9529ce 153 struct xe_exec_queue *q;
dd08ebf6
MB
154 int err;
155
25ce7c50
BW
156 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
157 extensions);
6e144a7d
BW
158 if (IS_ERR(q))
159 return q;
160
6e144a7d 161 err = __xe_exec_queue_init(q);
6e144a7d
BW
162 if (err)
163 goto err_post_alloc;
dd08ebf6 164
9b9529ce 165 return q;
6e144a7d
BW
166
167err_post_alloc:
168 __xe_exec_queue_free(q);
169 return ERR_PTR(err);
dd08ebf6
MB
170}
171
9b9529ce
FD
172struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
173 struct xe_vm *vm,
852856e3
MB
174 enum xe_engine_class class,
175 u32 flags, u64 extensions)
dd08ebf6
MB
176{
177 struct xe_hw_engine *hwe, *hwe0 = NULL;
178 enum xe_hw_engine_id id;
179 u32 logical_mask = 0;
180
181 for_each_hw_engine(hwe, gt, id) {
182 if (xe_hw_engine_is_reserved(hwe))
183 continue;
184
185 if (hwe->class == class) {
186 logical_mask |= BIT(hwe->logical_instance);
187 if (!hwe0)
188 hwe0 = hwe;
189 }
190 }
191
192 if (!logical_mask)
193 return ERR_PTR(-ENODEV);
194
852856e3
MB
195 return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions);
196}
197
198/**
199 * xe_exec_queue_create_bind() - Create bind exec queue.
200 * @xe: Xe device.
201 * @tile: tile which bind exec queue belongs to.
202 * @flags: exec queue creation flags
203 * @extensions: exec queue creation extensions
204 *
205 * Normalize bind exec queue creation. Bind exec queue is tied to migration VM
206 * for access to physical memory required for page table programming. On a
207 * faulting devices the reserved copy engine instance must be used to avoid
208 * deadlocking (user binds cannot get stuck behind faults as kernel binds which
209 * resolve faults depend on user binds). On non-faulting devices any copy engine
210 * can be used.
211 *
212 * Returns exec queue on success, ERR_PTR on failure
213 */
214struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
215 struct xe_tile *tile,
216 u32 flags, u64 extensions)
217{
218 struct xe_gt *gt = tile->primary_gt;
219 struct xe_exec_queue *q;
220 struct xe_vm *migrate_vm;
221
222 migrate_vm = xe_migrate_get_vm(tile->migrate);
223 if (xe->info.has_usm) {
224 struct xe_hw_engine *hwe = xe_gt_hw_engine(gt,
225 XE_ENGINE_CLASS_COPY,
226 gt->usm.reserved_bcs_instance,
227 false);
228
249df8cb
DH
229 if (!hwe) {
230 xe_vm_put(migrate_vm);
852856e3 231 return ERR_PTR(-EINVAL);
249df8cb 232 }
852856e3
MB
233
234 q = xe_exec_queue_create(xe, migrate_vm,
235 BIT(hwe->logical_instance), 1, hwe,
236 flags, extensions);
237 } else {
238 q = xe_exec_queue_create_class(xe, gt, migrate_vm,
239 XE_ENGINE_CLASS_COPY, flags,
240 extensions);
241 }
242 xe_vm_put(migrate_vm);
243
244 return q;
dd08ebf6 245}
9d42476f 246ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
dd08ebf6 247
9b9529ce 248void xe_exec_queue_destroy(struct kref *ref)
dd08ebf6 249{
9b9529ce
FD
250 struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
251 struct xe_exec_queue *eq, *next;
dd08ebf6 252
e669f10c 253 xe_exec_queue_last_fence_put_unlocked(q);
9b9529ce
FD
254 if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
255 list_for_each_entry_safe(eq, next, &q->multi_gt_list,
dd08ebf6 256 multi_gt_link)
9b9529ce 257 xe_exec_queue_put(eq);
dd08ebf6
MB
258 }
259
9b9529ce 260 q->ops->fini(q);
dd08ebf6
MB
261}
262
9b9529ce 263void xe_exec_queue_fini(struct xe_exec_queue *q)
dd08ebf6
MB
264{
265 int i;
266
83db047d
LDM
267 /*
268 * Before releasing our ref to lrc and xef, accumulate our run ticks
0fd4380c 269 * and wakeup any waiters.
83db047d
LDM
270 */
271 xe_exec_queue_update_run_ticks(q);
0fd4380c
LDM
272 if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
273 wake_up_var(&q->xef->exec_queue.pending_removal);
83db047d 274
9b9529ce 275 for (i = 0; i < q->width; ++i)
264eecdb 276 xe_lrc_put(q->lrc[i]);
83db047d 277
6e144a7d 278 __xe_exec_queue_free(q);
dd08ebf6
MB
279}
280
0b1d1473
DCS
281void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
282{
283 switch (q->class) {
284 case XE_ENGINE_CLASS_RENDER:
a3c86b6d 285 snprintf(q->name, sizeof(q->name), "rcs%d", instance);
0b1d1473
DCS
286 break;
287 case XE_ENGINE_CLASS_VIDEO_DECODE:
a3c86b6d 288 snprintf(q->name, sizeof(q->name), "vcs%d", instance);
0b1d1473
DCS
289 break;
290 case XE_ENGINE_CLASS_VIDEO_ENHANCE:
a3c86b6d 291 snprintf(q->name, sizeof(q->name), "vecs%d", instance);
0b1d1473
DCS
292 break;
293 case XE_ENGINE_CLASS_COPY:
a3c86b6d 294 snprintf(q->name, sizeof(q->name), "bcs%d", instance);
0b1d1473
DCS
295 break;
296 case XE_ENGINE_CLASS_COMPUTE:
a3c86b6d 297 snprintf(q->name, sizeof(q->name), "ccs%d", instance);
0b1d1473 298 break;
29654910 299 case XE_ENGINE_CLASS_OTHER:
a3c86b6d 300 snprintf(q->name, sizeof(q->name), "gsccs%d", instance);
29654910 301 break;
0b1d1473
DCS
302 default:
303 XE_WARN_ON(q->class);
304 }
305}
306
9b9529ce 307struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
dd08ebf6 308{
9b9529ce 309 struct xe_exec_queue *q;
dd08ebf6 310
9b9529ce
FD
311 mutex_lock(&xef->exec_queue.lock);
312 q = xa_load(&xef->exec_queue.xa, id);
313 if (q)
314 xe_exec_queue_get(q);
315 mutex_unlock(&xef->exec_queue.lock);
dd08ebf6 316
9b9529ce 317 return q;
dd08ebf6
MB
318}
319
9b9529ce
FD
320enum xe_exec_queue_priority
321xe_exec_queue_device_get_max_priority(struct xe_device *xe)
ef5e3c2f 322{
9b9529ce
FD
323 return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
324 XE_EXEC_QUEUE_PRIORITY_NORMAL;
ef5e3c2f
JRS
325}
326
9b9529ce 327static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
79f944ee 328 u64 value)
dd08ebf6 329{
9b9529ce 330 if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
dd08ebf6
MB
331 return -EINVAL;
332
9b9529ce 333 if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
dd08ebf6
MB
334 return -EPERM;
335
25ce7c50
BW
336 q->sched_props.priority = value;
337 return 0;
dd08ebf6
MB
338}
339
d2776564
TU
340static bool xe_exec_queue_enforce_schedule_limit(void)
341{
342#if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
343 return true;
344#else
345 return !capable(CAP_SYS_NICE);
346#endif
347}
348
349static void
350xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
351 enum xe_exec_queue_sched_prop prop,
352 u32 *min, u32 *max)
353{
354 switch (prop) {
355 case XE_EXEC_QUEUE_JOB_TIMEOUT:
356 *min = eclass->sched_props.job_timeout_min;
357 *max = eclass->sched_props.job_timeout_max;
358 break;
359 case XE_EXEC_QUEUE_TIMESLICE:
360 *min = eclass->sched_props.timeslice_min;
361 *max = eclass->sched_props.timeslice_max;
362 break;
363 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
364 *min = eclass->sched_props.preempt_timeout_min;
365 *max = eclass->sched_props.preempt_timeout_max;
366 break;
367 default:
368 break;
369 }
370#if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
371 if (capable(CAP_SYS_NICE)) {
372 switch (prop) {
373 case XE_EXEC_QUEUE_JOB_TIMEOUT:
374 *min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
375 *max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
376 break;
377 case XE_EXEC_QUEUE_TIMESLICE:
378 *min = XE_HW_ENGINE_TIMESLICE_MIN;
379 *max = XE_HW_ENGINE_TIMESLICE_MAX;
380 break;
381 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
382 *min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
383 *max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
384 break;
385 default:
386 break;
387 }
388 }
389#endif
390}
391
9b9529ce 392static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
79f944ee 393 u64 value)
dd08ebf6 394{
d2776564
TU
395 u32 min = 0, max = 0;
396
397 xe_exec_queue_get_prop_minmax(q->hwe->eclass,
398 XE_EXEC_QUEUE_TIMESLICE, &min, &max);
399
400 if (xe_exec_queue_enforce_schedule_limit() &&
401 !xe_hw_engine_timeout_in_range(value, min, max))
402 return -EINVAL;
dd08ebf6 403
25ce7c50
BW
404 q->sched_props.timeslice_us = value;
405 return 0;
dd08ebf6
MB
406}
407
9b9529ce
FD
408typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
409 struct xe_exec_queue *q,
79f944ee 410 u64 value);
9b9529ce
FD
411
412static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
d5dc73db
FD
413 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
414 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
dd08ebf6
MB
415};
416
9b9529ce
FD
417static int exec_queue_user_ext_set_property(struct xe_device *xe,
418 struct xe_exec_queue *q,
79f944ee 419 u64 extension)
dd08ebf6
MB
420{
421 u64 __user *address = u64_to_user_ptr(extension);
5dc079d1 422 struct drm_xe_ext_set_property ext;
dd08ebf6
MB
423 int err;
424 u32 idx;
425
426 err = __copy_from_user(&ext, address, sizeof(ext));
b8c1ba83 427 if (XE_IOCTL_DBG(xe, err))
dd08ebf6
MB
428 return -EFAULT;
429
b8c1ba83 430 if (XE_IOCTL_DBG(xe, ext.property >=
9b9529ce 431 ARRAY_SIZE(exec_queue_set_property_funcs)) ||
84a1ed5e
FD
432 XE_IOCTL_DBG(xe, ext.pad) ||
433 XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
434 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
dd08ebf6
MB
435 return -EINVAL;
436
9b9529ce 437 idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
f1a9abc0
TH
438 if (!exec_queue_set_property_funcs[idx])
439 return -EINVAL;
440
79f944ee 441 return exec_queue_set_property_funcs[idx](xe, q, ext.value);
dd08ebf6
MB
442}
443
9b9529ce
FD
444typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
445 struct xe_exec_queue *q,
79f944ee 446 u64 extension);
dd08ebf6 447
c9cc3d65 448static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] = {
d5dc73db 449 [DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
dd08ebf6
MB
450};
451
452#define MAX_USER_EXTENSIONS 16
9b9529ce 453static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
79f944ee 454 u64 extensions, int ext_number)
dd08ebf6
MB
455{
456 u64 __user *address = u64_to_user_ptr(extensions);
7e9337c2 457 struct drm_xe_user_extension ext;
dd08ebf6
MB
458 int err;
459 u32 idx;
460
b8c1ba83 461 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
dd08ebf6
MB
462 return -E2BIG;
463
464 err = __copy_from_user(&ext, address, sizeof(ext));
b8c1ba83 465 if (XE_IOCTL_DBG(xe, err))
dd08ebf6
MB
466 return -EFAULT;
467
b8c1ba83
FD
468 if (XE_IOCTL_DBG(xe, ext.pad) ||
469 XE_IOCTL_DBG(xe, ext.name >=
9b9529ce 470 ARRAY_SIZE(exec_queue_user_extension_funcs)))
dd08ebf6
MB
471 return -EINVAL;
472
473 idx = array_index_nospec(ext.name,
9b9529ce 474 ARRAY_SIZE(exec_queue_user_extension_funcs));
79f944ee 475 err = exec_queue_user_extension_funcs[idx](xe, q, extensions);
b8c1ba83 476 if (XE_IOCTL_DBG(xe, err))
dd08ebf6
MB
477 return err;
478
479 if (ext.next_extension)
9b9529ce 480 return exec_queue_user_extensions(xe, q, ext.next_extension,
79f944ee 481 ++ext_number);
dd08ebf6
MB
482
483 return 0;
484}
485
dd08ebf6
MB
486static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
487 struct drm_xe_engine_class_instance *eci,
488 u16 width, u16 num_placements)
489{
490 int len = width * num_placements;
491 int i, j, n;
492 u16 class;
493 u16 gt_id;
494 u32 return_mask = 0, prev_mask;
495
c4991ee0 496 if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) &&
dd08ebf6
MB
497 len > 1))
498 return 0;
499
500 for (i = 0; i < width; ++i) {
501 u32 current_mask = 0;
502
503 for (j = 0; j < num_placements; ++j) {
504 struct xe_hw_engine *hwe;
505
506 n = j * width + i;
507
6f20fc09 508 hwe = xe_hw_engine_lookup(xe, eci[n]);
b8c1ba83 509 if (XE_IOCTL_DBG(xe, !hwe))
dd08ebf6
MB
510 return 0;
511
b8c1ba83 512 if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
dd08ebf6
MB
513 return 0;
514
b8c1ba83
FD
515 if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
516 XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
dd08ebf6
MB
517 return 0;
518
519 class = eci[n].engine_class;
520 gt_id = eci[n].gt_id;
521
522 if (width == 1 || !i)
523 return_mask |= BIT(eci[n].engine_instance);
524 current_mask |= BIT(eci[n].engine_instance);
525 }
526
527 /* Parallel submissions must be logically contiguous */
b8c1ba83 528 if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
dd08ebf6
MB
529 return 0;
530
531 prev_mask = current_mask;
532 }
533
534 return return_mask;
535}
536
9b9529ce
FD
537int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
538 struct drm_file *file)
dd08ebf6
MB
539{
540 struct xe_device *xe = to_xe_device(dev);
541 struct xe_file *xef = to_xe_file(file);
9b9529ce 542 struct drm_xe_exec_queue_create *args = data;
dd08ebf6
MB
543 struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
544 struct drm_xe_engine_class_instance __user *user_eci =
545 u64_to_user_ptr(args->instances);
546 struct xe_hw_engine *hwe;
852856e3 547 struct xe_vm *vm;
dd08ebf6 548 struct xe_gt *gt;
852856e3 549 struct xe_tile *tile;
9b9529ce 550 struct xe_exec_queue *q = NULL;
dd08ebf6
MB
551 u32 logical_mask;
552 u32 id;
7f38e1e1 553 u32 len;
dd08ebf6
MB
554 int err;
555
b8c1ba83
FD
556 if (XE_IOCTL_DBG(xe, args->flags) ||
557 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
dd08ebf6
MB
558 return -EINVAL;
559
560 len = args->width * args->num_placements;
b8c1ba83 561 if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
dd08ebf6
MB
562 return -EINVAL;
563
564 err = __copy_from_user(eci, user_eci,
565 sizeof(struct drm_xe_engine_class_instance) *
566 len);
b8c1ba83 567 if (XE_IOCTL_DBG(xe, err))
dd08ebf6
MB
568 return -EFAULT;
569
2a6d871b 570 if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
763931d2 571 return -EINVAL;
dd08ebf6 572
d3d76739 573 if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
852856e3
MB
574 if (XE_IOCTL_DBG(xe, args->width != 1) ||
575 XE_IOCTL_DBG(xe, args->num_placements != 1) ||
576 XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
577 return -EINVAL;
25ce7c50 578
852856e3
MB
579 for_each_tile(tile, xe, id) {
580 struct xe_exec_queue *new;
581 u32 flags = EXEC_QUEUE_FLAG_VM;
6aa26f6e 582
852856e3
MB
583 if (id)
584 flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
6aa26f6e 585
852856e3
MB
586 new = xe_exec_queue_create_bind(xe, tile, flags,
587 args->extensions);
dd08ebf6
MB
588 if (IS_ERR(new)) {
589 err = PTR_ERR(new);
9b9529ce
FD
590 if (q)
591 goto put_exec_queue;
67f2f0d7 592 return err;
dd08ebf6
MB
593 }
594 if (id == 0)
9b9529ce 595 q = new;
dd08ebf6
MB
596 else
597 list_add_tail(&new->multi_gt_list,
9b9529ce 598 &q->multi_gt_link);
dd08ebf6
MB
599 }
600 } else {
601 gt = xe_device_get_gt(xe, eci[0].gt_id);
602 logical_mask = calc_validate_logical_mask(xe, gt, eci,
603 args->width,
604 args->num_placements);
b8c1ba83 605 if (XE_IOCTL_DBG(xe, !logical_mask))
67f2f0d7 606 return -EINVAL;
dd08ebf6 607
6f20fc09 608 hwe = xe_hw_engine_lookup(xe, eci[0]);
b8c1ba83 609 if (XE_IOCTL_DBG(xe, !hwe))
67f2f0d7 610 return -EINVAL;
dd08ebf6
MB
611
612 vm = xe_vm_lookup(xef, args->vm_id);
b8c1ba83 613 if (XE_IOCTL_DBG(xe, !vm))
67f2f0d7 614 return -ENOENT;
dd08ebf6 615
9d858b69
MB
616 err = down_read_interruptible(&vm->lock);
617 if (err) {
618 xe_vm_put(vm);
619 return err;
620 }
621
b8c1ba83 622 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
9d858b69
MB
623 up_read(&vm->lock);
624 xe_vm_put(vm);
625 return -ENOENT;
626 }
627
9b9529ce 628 q = xe_exec_queue_create(xe, vm, logical_mask,
f1a9abc0 629 args->width, hwe, 0,
25ce7c50 630 args->extensions);
9d858b69 631 up_read(&vm->lock);
dd08ebf6 632 xe_vm_put(vm);
9b9529ce
FD
633 if (IS_ERR(q))
634 return PTR_ERR(q);
e05c6c97 635
fdb6a053 636 if (xe_vm_in_preempt_fence_mode(vm)) {
731e46c0 637 q->lr.context = dma_fence_context_alloc(1);
e05c6c97
MB
638
639 err = xe_vm_add_compute_exec_queue(vm, q);
640 if (XE_IOCTL_DBG(xe, err))
641 goto put_exec_queue;
642 }
7970cb36
FD
643
644 if (q->vm && q->hwe->hw_engine_group) {
645 err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q);
646 if (err)
647 goto put_exec_queue;
648 }
dd08ebf6
MB
649 }
650
16536582
MA
651 q->xef = xe_file_get(xef);
652
653 /* user id alloc must always be last in ioctl to prevent UAF */
9b9529ce 654 err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
dd08ebf6 655 if (err)
e05c6c97 656 goto kill_exec_queue;
dd08ebf6 657
9b9529ce 658 args->exec_queue_id = id;
dd08ebf6
MB
659
660 return 0;
661
e05c6c97 662kill_exec_queue:
9b9529ce 663 xe_exec_queue_kill(q);
e05c6c97 664put_exec_queue:
9b9529ce 665 xe_exec_queue_put(q);
dd08ebf6
MB
666 return err;
667}
668
9b9529ce
FD
669int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
670 struct drm_file *file)
19431b02
JRS
671{
672 struct xe_device *xe = to_xe_device(dev);
673 struct xe_file *xef = to_xe_file(file);
9b9529ce
FD
674 struct drm_xe_exec_queue_get_property *args = data;
675 struct xe_exec_queue *q;
5db4afe1 676 int ret;
19431b02 677
b8c1ba83 678 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
1799c761
CS
679 return -EINVAL;
680
9b9529ce
FD
681 q = xe_exec_queue_lookup(xef, args->exec_queue_id);
682 if (XE_IOCTL_DBG(xe, !q))
19431b02
JRS
683 return -ENOENT;
684
685 switch (args->property) {
d5dc73db 686 case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
4468d048 687 args->value = q->ops->reset_status(q);
5db4afe1 688 ret = 0;
19431b02
JRS
689 break;
690 default:
5db4afe1 691 ret = -EINVAL;
19431b02
JRS
692 }
693
9b9529ce 694 xe_exec_queue_put(q);
5db4afe1
MK
695
696 return ret;
19431b02
JRS
697}
698
8ae8a2e8 699/**
9b9529ce
FD
700 * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
701 * @q: The exec_queue
8ae8a2e8 702 *
9b9529ce 703 * Return: True if the exec_queue is long-running, false otherwise.
8ae8a2e8 704 */
9b9529ce 705bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
8ae8a2e8 706{
fdb6a053 707 return q->vm && xe_vm_in_lr_mode(q->vm) &&
9b9529ce 708 !(q->flags & EXEC_QUEUE_FLAG_VM);
8ae8a2e8
MB
709}
710
9b9529ce 711static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
8ae8a2e8 712{
264eecdb 713 return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1;
8ae8a2e8
MB
714}
715
716/**
9b9529ce
FD
717 * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
718 * @q: The exec_queue
8ae8a2e8 719 *
9b9529ce 720 * Return: True if the exec_queue's ring is full, false otherwise.
8ae8a2e8 721 */
9b9529ce 722bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
8ae8a2e8 723{
264eecdb 724 struct xe_lrc *lrc = q->lrc[0];
8ae8a2e8
MB
725 s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
726
9b9529ce 727 return xe_exec_queue_num_job_inflight(q) >= max_job;
8ae8a2e8
MB
728}
729
155c9165 730/**
9b9529ce
FD
731 * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
732 * @q: The exec_queue
155c9165
TH
733 *
734 * FIXME: Need to determine what to use as the short-lived
9b9529ce 735 * timeline lock for the exec_queues, so that the return value
155c9165
TH
736 * of this function becomes more than just an advisory
737 * snapshot in time. The timeline lock must protect the
9b9529ce 738 * seqno from racing submissions on the same exec_queue.
155c9165
TH
739 * Typically vm->resv, but user-created timeline locks use the migrate vm
740 * and never grabs the migrate vm->resv so we have a race there.
741 *
9b9529ce 742 * Return: True if the exec_queue is idle, false otherwise.
155c9165 743 */
9b9529ce 744bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
155c9165 745{
5009d554
MB
746 if (xe_exec_queue_is_parallel(q)) {
747 int i;
748
749 for (i = 0; i < q->width; ++i) {
264eecdb
NV
750 if (xe_lrc_seqno(q->lrc[i]) !=
751 q->lrc[i]->fence_ctx.next_seqno - 1)
5009d554
MB
752 return false;
753 }
754
755 return true;
756 }
155c9165 757
264eecdb
NV
758 return xe_lrc_seqno(q->lrc[0]) ==
759 q->lrc[0]->fence_ctx.next_seqno - 1;
155c9165
TH
760}
761
6109f24f 762/**
45bb564d
UNR
763 * xe_exec_queue_update_run_ticks() - Update run time in ticks for this exec queue
764 * from hw
6109f24f
UNR
765 * @q: The exec queue
766 *
45bb564d
UNR
767 * Update the timestamp saved by HW for this exec queue and save run ticks
768 * calculated by using the delta from last update.
6109f24f 769 */
45bb564d 770void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
6109f24f 771{
4ca1fd41 772 struct xe_device *xe = gt_to_xe(q->gt);
6109f24f
UNR
773 struct xe_lrc *lrc;
774 u32 old_ts, new_ts;
4ca1fd41 775 int idx;
6109f24f
UNR
776
777 /*
2054d38c
LDM
778 * Jobs that are executed by kernel doesn't have a corresponding xe_file
779 * and thus are not accounted.
6109f24f 780 */
2054d38c 781 if (!q->xef)
6109f24f
UNR
782 return;
783
4ca1fd41
LDM
784 /* Synchronize with unbind while holding the xe file open */
785 if (!drm_dev_enter(&xe->drm, &idx))
786 return;
6109f24f
UNR
787 /*
788 * Only sample the first LRC. For parallel submission, all of them are
789 * scheduled together and we compensate that below by multiplying by
790 * width - this may introduce errors if that premise is not true and
791 * they don't exit 100% aligned. On the other hand, looping through
792 * the LRCs and reading them in different time could also introduce
793 * errors.
794 */
264eecdb 795 lrc = q->lrc[0];
6109f24f 796 new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
2054d38c 797 q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
4ca1fd41
LDM
798
799 drm_dev_exit(idx);
6109f24f
UNR
800}
801
b0ee81da
DCS
802/**
803 * xe_exec_queue_kill - permanently stop all execution from an exec queue
804 * @q: The exec queue
805 *
806 * This function permanently stops all activity on an exec queue. If the queue
807 * is actively executing on the HW, it will be kicked off the engine; any
808 * pending jobs are discarded and all future submissions are rejected.
809 * This function is safe to call multiple times.
810 */
9b9529ce 811void xe_exec_queue_kill(struct xe_exec_queue *q)
dd08ebf6 812{
9b9529ce 813 struct xe_exec_queue *eq = q, *next;
dd08ebf6 814
9b9529ce 815 list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
dd08ebf6 816 multi_gt_link) {
9b9529ce 817 q->ops->kill(eq);
abce4e4b 818 xe_vm_remove_compute_exec_queue(q->vm, eq);
dd08ebf6
MB
819 }
820
9b9529ce 821 q->ops->kill(q);
abce4e4b 822 xe_vm_remove_compute_exec_queue(q->vm, q);
dd08ebf6
MB
823}
824
9b9529ce
FD
825int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
826 struct drm_file *file)
dd08ebf6
MB
827{
828 struct xe_device *xe = to_xe_device(dev);
829 struct xe_file *xef = to_xe_file(file);
9b9529ce
FD
830 struct drm_xe_exec_queue_destroy *args = data;
831 struct xe_exec_queue *q;
dd08ebf6 832
b8c1ba83
FD
833 if (XE_IOCTL_DBG(xe, args->pad) ||
834 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
dd08ebf6
MB
835 return -EINVAL;
836
9b9529ce
FD
837 mutex_lock(&xef->exec_queue.lock);
838 q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
0fd4380c
LDM
839 if (q)
840 atomic_inc(&xef->exec_queue.pending_removal);
9b9529ce 841 mutex_unlock(&xef->exec_queue.lock);
0fd4380c 842
9b9529ce 843 if (XE_IOCTL_DBG(xe, !q))
dd08ebf6
MB
844 return -ENOENT;
845
7970cb36
FD
846 if (q->vm && q->hwe->hw_engine_group)
847 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
848
f1a9abc0 849 xe_exec_queue_kill(q);
dd08ebf6 850
9b9529ce
FD
851 trace_xe_exec_queue_close(q);
852 xe_exec_queue_put(q);
dd08ebf6
MB
853
854 return 0;
855}
856
e669f10c
MB
857static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
858 struct xe_vm *vm)
859{
0d92cd89 860 if (q->flags & EXEC_QUEUE_FLAG_VM) {
eb9702ad 861 lockdep_assert_held(&vm->lock);
0d92cd89 862 } else {
eb9702ad 863 xe_vm_assert_held(vm);
0d92cd89
FD
864 lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem);
865 }
e669f10c
MB
866}
867
868/**
869 * xe_exec_queue_last_fence_put() - Drop ref to last fence
870 * @q: The exec queue
871 * @vm: The VM the engine does a bind or exec for
872 */
873void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
874{
875 xe_exec_queue_last_fence_lockdep_assert(q, vm);
876
7f0d7bee 877 xe_exec_queue_last_fence_put_unlocked(q);
e669f10c
MB
878}
879
880/**
881 * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
882 * @q: The exec queue
883 *
884 * Only safe to be called from xe_exec_queue_destroy().
885 */
886void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
887{
888 if (q->last_fence) {
889 dma_fence_put(q->last_fence);
890 q->last_fence = NULL;
891 }
892}
893
894/**
895 * xe_exec_queue_last_fence_get() - Get last fence
896 * @q: The exec queue
897 * @vm: The VM the engine does a bind or exec for
898 *
a856b67a 899 * Get last fence, takes a ref
e669f10c
MB
900 *
901 * Returns: last fence if not signaled, dma fence stub if signaled
902 */
903struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
904 struct xe_vm *vm)
905{
a856b67a
MB
906 struct dma_fence *fence;
907
e669f10c
MB
908 xe_exec_queue_last_fence_lockdep_assert(q, vm);
909
910 if (q->last_fence &&
911 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
912 xe_exec_queue_last_fence_put(q, vm);
913
a856b67a
MB
914 fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
915 dma_fence_get(fence);
916 return fence;
e669f10c
MB
917}
918
0d92cd89
FD
919/**
920 * xe_exec_queue_last_fence_get_for_resume() - Get last fence
921 * @q: The exec queue
922 * @vm: The VM the engine does a bind or exec for
923 *
924 * Get last fence, takes a ref. Only safe to be called in the context of
925 * resuming the hw engine group's long-running exec queue, when the group
926 * semaphore is held.
927 *
928 * Returns: last fence if not signaled, dma fence stub if signaled
929 */
930struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q,
931 struct xe_vm *vm)
932{
933 struct dma_fence *fence;
934
935 lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem);
936
937 if (q->last_fence &&
938 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
939 xe_exec_queue_last_fence_put_unlocked(q);
940
941 fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
942 dma_fence_get(fence);
943 return fence;
944}
945
e669f10c
MB
946/**
947 * xe_exec_queue_last_fence_set() - Set last fence
948 * @q: The exec queue
949 * @vm: The VM the engine does a bind or exec for
950 * @fence: The fence
951 *
952 * Set the last fence for the engine. Increases reference count for fence, when
953 * closing engine xe_exec_queue_last_fence_put should be called.
954 */
955void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
956 struct dma_fence *fence)
957{
958 xe_exec_queue_last_fence_lockdep_assert(q, vm);
959
960 xe_exec_queue_last_fence_put(q, vm);
961 q->last_fence = dma_fence_get(fence);
962}
96e7ebb2
MB
963
964/**
965 * xe_exec_queue_last_fence_test_dep - Test last fence dependency of queue
966 * @q: The exec queue
967 * @vm: The VM the engine does a bind or exec for
968 *
969 * Returns:
970 * -ETIME if there exists an unsignalled last fence dependency, zero otherwise.
971 */
972int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm)
973{
974 struct dma_fence *fence;
975 int err = 0;
976
977 fence = xe_exec_queue_last_fence_get(q, vm);
978 if (fence) {
979 err = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ?
980 0 : -ETIME;
981 dma_fence_put(fence);
982 }
983
984 return err;
985}
This page took 0.496791 seconds and 5 git commands to generate.