]> Git Repo - linux.git/blob - drivers/gpu/drm/xe/xe_exec_queue.c
Merge patch series "riscv: Extension parsing fixes"
[linux.git] / drivers / gpu / drm / xe / xe_exec_queue.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5
6 #include "xe_exec_queue.h"
7
8 #include <linux/nospec.h>
9
10 #include <drm/drm_device.h>
11 #include <drm/drm_file.h>
12 #include <drm/xe_drm.h>
13
14 #include "xe_device.h"
15 #include "xe_gt.h"
16 #include "xe_hw_engine_class_sysfs.h"
17 #include "xe_hw_fence.h"
18 #include "xe_lrc.h"
19 #include "xe_macros.h"
20 #include "xe_migrate.h"
21 #include "xe_pm.h"
22 #include "xe_ring_ops_types.h"
23 #include "xe_trace.h"
24 #include "xe_vm.h"
25
26 enum xe_exec_queue_sched_prop {
27         XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
28         XE_EXEC_QUEUE_TIMESLICE = 1,
29         XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
30         XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
31 };
32
33 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
34                                       u64 extensions, int ext_number);
35
36 static void __xe_exec_queue_free(struct xe_exec_queue *q)
37 {
38         if (q->vm)
39                 xe_vm_put(q->vm);
40         kfree(q);
41 }
42
43 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
44                                                    struct xe_vm *vm,
45                                                    u32 logical_mask,
46                                                    u16 width, struct xe_hw_engine *hwe,
47                                                    u32 flags, u64 extensions)
48 {
49         struct xe_exec_queue *q;
50         struct xe_gt *gt = hwe->gt;
51         int err;
52
53         /* only kernel queues can be permanent */
54         XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
55
56         q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL);
57         if (!q)
58                 return ERR_PTR(-ENOMEM);
59
60         kref_init(&q->refcount);
61         q->flags = flags;
62         q->hwe = hwe;
63         q->gt = gt;
64         q->class = hwe->class;
65         q->width = width;
66         q->logical_mask = logical_mask;
67         q->fence_irq = &gt->fence_irq[hwe->class];
68         q->ring_ops = gt->ring_ops[hwe->class];
69         q->ops = gt->exec_queue_ops;
70         INIT_LIST_HEAD(&q->compute.link);
71         INIT_LIST_HEAD(&q->multi_gt_link);
72
73         q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
74         q->sched_props.preempt_timeout_us =
75                                 hwe->eclass->sched_props.preempt_timeout_us;
76         q->sched_props.job_timeout_ms =
77                                 hwe->eclass->sched_props.job_timeout_ms;
78         if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
79             q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
80                 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
81         else
82                 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
83
84         if (vm)
85                 q->vm = xe_vm_get(vm);
86
87         if (extensions) {
88                 /*
89                  * may set q->usm, must come before xe_lrc_init(),
90                  * may overwrite q->sched_props, must come before q->ops->init()
91                  */
92                 err = exec_queue_user_extensions(xe, q, extensions, 0);
93                 if (err) {
94                         __xe_exec_queue_free(q);
95                         return ERR_PTR(err);
96                 }
97         }
98
99         if (xe_exec_queue_is_parallel(q)) {
100                 q->parallel.composite_fence_ctx = dma_fence_context_alloc(1);
101                 q->parallel.composite_fence_seqno = XE_FENCE_INITIAL_SEQNO;
102         }
103
104         return q;
105 }
106
107 static int __xe_exec_queue_init(struct xe_exec_queue *q)
108 {
109         struct xe_device *xe = gt_to_xe(q->gt);
110         int i, err;
111
112         for (i = 0; i < q->width; ++i) {
113                 err = xe_lrc_init(q->lrc + i, q->hwe, q, q->vm, SZ_16K);
114                 if (err)
115                         goto err_lrc;
116         }
117
118         err = q->ops->init(q);
119         if (err)
120                 goto err_lrc;
121
122         /*
123          * Normally the user vm holds an rpm ref to keep the device
124          * awake, and the context holds a ref for the vm, however for
125          * some engines we use the kernels migrate vm underneath which offers no
126          * such rpm ref, or we lack a vm. Make sure we keep a ref here, so we
127          * can perform GuC CT actions when needed. Caller is expected to have
128          * already grabbed the rpm ref outside any sensitive locks.
129          */
130         if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm))
131                 xe_pm_runtime_get_noresume(xe);
132
133         return 0;
134
135 err_lrc:
136         for (i = i - 1; i >= 0; --i)
137                 xe_lrc_finish(q->lrc + i);
138         return err;
139 }
140
141 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
142                                            u32 logical_mask, u16 width,
143                                            struct xe_hw_engine *hwe, u32 flags,
144                                            u64 extensions)
145 {
146         struct xe_exec_queue *q;
147         int err;
148
149         q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
150                                   extensions);
151         if (IS_ERR(q))
152                 return q;
153
154         if (vm) {
155                 err = xe_vm_lock(vm, true);
156                 if (err)
157                         goto err_post_alloc;
158         }
159
160         err = __xe_exec_queue_init(q);
161         if (vm)
162                 xe_vm_unlock(vm);
163         if (err)
164                 goto err_post_alloc;
165
166         return q;
167
168 err_post_alloc:
169         __xe_exec_queue_free(q);
170         return ERR_PTR(err);
171 }
172
173 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
174                                                  struct xe_vm *vm,
175                                                  enum xe_engine_class class, u32 flags)
176 {
177         struct xe_hw_engine *hwe, *hwe0 = NULL;
178         enum xe_hw_engine_id id;
179         u32 logical_mask = 0;
180
181         for_each_hw_engine(hwe, gt, id) {
182                 if (xe_hw_engine_is_reserved(hwe))
183                         continue;
184
185                 if (hwe->class == class) {
186                         logical_mask |= BIT(hwe->logical_instance);
187                         if (!hwe0)
188                                 hwe0 = hwe;
189                 }
190         }
191
192         if (!logical_mask)
193                 return ERR_PTR(-ENODEV);
194
195         return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, 0);
196 }
197
198 void xe_exec_queue_destroy(struct kref *ref)
199 {
200         struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
201         struct xe_exec_queue *eq, *next;
202
203         xe_exec_queue_last_fence_put_unlocked(q);
204         if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
205                 list_for_each_entry_safe(eq, next, &q->multi_gt_list,
206                                          multi_gt_link)
207                         xe_exec_queue_put(eq);
208         }
209
210         q->ops->fini(q);
211 }
212
213 void xe_exec_queue_fini(struct xe_exec_queue *q)
214 {
215         int i;
216
217         for (i = 0; i < q->width; ++i)
218                 xe_lrc_finish(q->lrc + i);
219         if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM || !q->vm))
220                 xe_pm_runtime_put(gt_to_xe(q->gt));
221         __xe_exec_queue_free(q);
222 }
223
224 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
225 {
226         switch (q->class) {
227         case XE_ENGINE_CLASS_RENDER:
228                 snprintf(q->name, sizeof(q->name), "rcs%d", instance);
229                 break;
230         case XE_ENGINE_CLASS_VIDEO_DECODE:
231                 snprintf(q->name, sizeof(q->name), "vcs%d", instance);
232                 break;
233         case XE_ENGINE_CLASS_VIDEO_ENHANCE:
234                 snprintf(q->name, sizeof(q->name), "vecs%d", instance);
235                 break;
236         case XE_ENGINE_CLASS_COPY:
237                 snprintf(q->name, sizeof(q->name), "bcs%d", instance);
238                 break;
239         case XE_ENGINE_CLASS_COMPUTE:
240                 snprintf(q->name, sizeof(q->name), "ccs%d", instance);
241                 break;
242         case XE_ENGINE_CLASS_OTHER:
243                 snprintf(q->name, sizeof(q->name), "gsccs%d", instance);
244                 break;
245         default:
246                 XE_WARN_ON(q->class);
247         }
248 }
249
250 struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
251 {
252         struct xe_exec_queue *q;
253
254         mutex_lock(&xef->exec_queue.lock);
255         q = xa_load(&xef->exec_queue.xa, id);
256         if (q)
257                 xe_exec_queue_get(q);
258         mutex_unlock(&xef->exec_queue.lock);
259
260         return q;
261 }
262
263 enum xe_exec_queue_priority
264 xe_exec_queue_device_get_max_priority(struct xe_device *xe)
265 {
266         return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
267                                        XE_EXEC_QUEUE_PRIORITY_NORMAL;
268 }
269
270 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
271                                    u64 value)
272 {
273         if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
274                 return -EINVAL;
275
276         if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
277                 return -EPERM;
278
279         q->sched_props.priority = value;
280         return 0;
281 }
282
283 static bool xe_exec_queue_enforce_schedule_limit(void)
284 {
285 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
286         return true;
287 #else
288         return !capable(CAP_SYS_NICE);
289 #endif
290 }
291
292 static void
293 xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
294                               enum xe_exec_queue_sched_prop prop,
295                               u32 *min, u32 *max)
296 {
297         switch (prop) {
298         case XE_EXEC_QUEUE_JOB_TIMEOUT:
299                 *min = eclass->sched_props.job_timeout_min;
300                 *max = eclass->sched_props.job_timeout_max;
301                 break;
302         case XE_EXEC_QUEUE_TIMESLICE:
303                 *min = eclass->sched_props.timeslice_min;
304                 *max = eclass->sched_props.timeslice_max;
305                 break;
306         case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
307                 *min = eclass->sched_props.preempt_timeout_min;
308                 *max = eclass->sched_props.preempt_timeout_max;
309                 break;
310         default:
311                 break;
312         }
313 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
314         if (capable(CAP_SYS_NICE)) {
315                 switch (prop) {
316                 case XE_EXEC_QUEUE_JOB_TIMEOUT:
317                         *min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
318                         *max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
319                         break;
320                 case XE_EXEC_QUEUE_TIMESLICE:
321                         *min = XE_HW_ENGINE_TIMESLICE_MIN;
322                         *max = XE_HW_ENGINE_TIMESLICE_MAX;
323                         break;
324                 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
325                         *min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
326                         *max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
327                         break;
328                 default:
329                         break;
330                 }
331         }
332 #endif
333 }
334
335 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
336                                     u64 value)
337 {
338         u32 min = 0, max = 0;
339
340         xe_exec_queue_get_prop_minmax(q->hwe->eclass,
341                                       XE_EXEC_QUEUE_TIMESLICE, &min, &max);
342
343         if (xe_exec_queue_enforce_schedule_limit() &&
344             !xe_hw_engine_timeout_in_range(value, min, max))
345                 return -EINVAL;
346
347         q->sched_props.timeslice_us = value;
348         return 0;
349 }
350
351 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
352                                              struct xe_exec_queue *q,
353                                              u64 value);
354
355 static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
356         [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
357         [DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
358 };
359
360 static int exec_queue_user_ext_set_property(struct xe_device *xe,
361                                             struct xe_exec_queue *q,
362                                             u64 extension)
363 {
364         u64 __user *address = u64_to_user_ptr(extension);
365         struct drm_xe_ext_set_property ext;
366         int err;
367         u32 idx;
368
369         err = __copy_from_user(&ext, address, sizeof(ext));
370         if (XE_IOCTL_DBG(xe, err))
371                 return -EFAULT;
372
373         if (XE_IOCTL_DBG(xe, ext.property >=
374                          ARRAY_SIZE(exec_queue_set_property_funcs)) ||
375             XE_IOCTL_DBG(xe, ext.pad) ||
376             XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
377                          ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
378                 return -EINVAL;
379
380         idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
381         if (!exec_queue_set_property_funcs[idx])
382                 return -EINVAL;
383
384         return exec_queue_set_property_funcs[idx](xe, q, ext.value);
385 }
386
387 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
388                                                struct xe_exec_queue *q,
389                                                u64 extension);
390
391 static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] = {
392         [DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
393 };
394
395 #define MAX_USER_EXTENSIONS     16
396 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
397                                       u64 extensions, int ext_number)
398 {
399         u64 __user *address = u64_to_user_ptr(extensions);
400         struct drm_xe_user_extension ext;
401         int err;
402         u32 idx;
403
404         if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
405                 return -E2BIG;
406
407         err = __copy_from_user(&ext, address, sizeof(ext));
408         if (XE_IOCTL_DBG(xe, err))
409                 return -EFAULT;
410
411         if (XE_IOCTL_DBG(xe, ext.pad) ||
412             XE_IOCTL_DBG(xe, ext.name >=
413                          ARRAY_SIZE(exec_queue_user_extension_funcs)))
414                 return -EINVAL;
415
416         idx = array_index_nospec(ext.name,
417                                  ARRAY_SIZE(exec_queue_user_extension_funcs));
418         err = exec_queue_user_extension_funcs[idx](xe, q, extensions);
419         if (XE_IOCTL_DBG(xe, err))
420                 return err;
421
422         if (ext.next_extension)
423                 return exec_queue_user_extensions(xe, q, ext.next_extension,
424                                                   ++ext_number);
425
426         return 0;
427 }
428
429 static const enum xe_engine_class user_to_xe_engine_class[] = {
430         [DRM_XE_ENGINE_CLASS_RENDER] = XE_ENGINE_CLASS_RENDER,
431         [DRM_XE_ENGINE_CLASS_COPY] = XE_ENGINE_CLASS_COPY,
432         [DRM_XE_ENGINE_CLASS_VIDEO_DECODE] = XE_ENGINE_CLASS_VIDEO_DECODE,
433         [DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE] = XE_ENGINE_CLASS_VIDEO_ENHANCE,
434         [DRM_XE_ENGINE_CLASS_COMPUTE] = XE_ENGINE_CLASS_COMPUTE,
435 };
436
437 static struct xe_hw_engine *
438 find_hw_engine(struct xe_device *xe,
439                struct drm_xe_engine_class_instance eci)
440 {
441         u32 idx;
442
443         if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class))
444                 return NULL;
445
446         if (eci.gt_id >= xe->info.gt_count)
447                 return NULL;
448
449         idx = array_index_nospec(eci.engine_class,
450                                  ARRAY_SIZE(user_to_xe_engine_class));
451
452         return xe_gt_hw_engine(xe_device_get_gt(xe, eci.gt_id),
453                                user_to_xe_engine_class[idx],
454                                eci.engine_instance, true);
455 }
456
457 static u32 bind_exec_queue_logical_mask(struct xe_device *xe, struct xe_gt *gt,
458                                         struct drm_xe_engine_class_instance *eci,
459                                         u16 width, u16 num_placements)
460 {
461         struct xe_hw_engine *hwe;
462         enum xe_hw_engine_id id;
463         u32 logical_mask = 0;
464
465         if (XE_IOCTL_DBG(xe, width != 1))
466                 return 0;
467         if (XE_IOCTL_DBG(xe, num_placements != 1))
468                 return 0;
469         if (XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
470                 return 0;
471
472         eci[0].engine_class = DRM_XE_ENGINE_CLASS_COPY;
473
474         for_each_hw_engine(hwe, gt, id) {
475                 if (xe_hw_engine_is_reserved(hwe))
476                         continue;
477
478                 if (hwe->class ==
479                     user_to_xe_engine_class[DRM_XE_ENGINE_CLASS_COPY])
480                         logical_mask |= BIT(hwe->logical_instance);
481         }
482
483         return logical_mask;
484 }
485
486 static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
487                                       struct drm_xe_engine_class_instance *eci,
488                                       u16 width, u16 num_placements)
489 {
490         int len = width * num_placements;
491         int i, j, n;
492         u16 class;
493         u16 gt_id;
494         u32 return_mask = 0, prev_mask;
495
496         if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) &&
497                          len > 1))
498                 return 0;
499
500         for (i = 0; i < width; ++i) {
501                 u32 current_mask = 0;
502
503                 for (j = 0; j < num_placements; ++j) {
504                         struct xe_hw_engine *hwe;
505
506                         n = j * width + i;
507
508                         hwe = find_hw_engine(xe, eci[n]);
509                         if (XE_IOCTL_DBG(xe, !hwe))
510                                 return 0;
511
512                         if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
513                                 return 0;
514
515                         if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
516                             XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
517                                 return 0;
518
519                         class = eci[n].engine_class;
520                         gt_id = eci[n].gt_id;
521
522                         if (width == 1 || !i)
523                                 return_mask |= BIT(eci[n].engine_instance);
524                         current_mask |= BIT(eci[n].engine_instance);
525                 }
526
527                 /* Parallel submissions must be logically contiguous */
528                 if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
529                         return 0;
530
531                 prev_mask = current_mask;
532         }
533
534         return return_mask;
535 }
536
537 int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
538                                struct drm_file *file)
539 {
540         struct xe_device *xe = to_xe_device(dev);
541         struct xe_file *xef = to_xe_file(file);
542         struct drm_xe_exec_queue_create *args = data;
543         struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
544         struct drm_xe_engine_class_instance __user *user_eci =
545                 u64_to_user_ptr(args->instances);
546         struct xe_hw_engine *hwe;
547         struct xe_vm *vm, *migrate_vm;
548         struct xe_gt *gt;
549         struct xe_exec_queue *q = NULL;
550         u32 logical_mask;
551         u32 id;
552         u32 len;
553         int err;
554
555         if (XE_IOCTL_DBG(xe, args->flags) ||
556             XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
557                 return -EINVAL;
558
559         len = args->width * args->num_placements;
560         if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
561                 return -EINVAL;
562
563         err = __copy_from_user(eci, user_eci,
564                                sizeof(struct drm_xe_engine_class_instance) *
565                                len);
566         if (XE_IOCTL_DBG(xe, err))
567                 return -EFAULT;
568
569         if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
570                 return -EINVAL;
571
572         if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
573                 for_each_gt(gt, xe, id) {
574                         struct xe_exec_queue *new;
575                         u32 flags;
576
577                         if (xe_gt_is_media_type(gt))
578                                 continue;
579
580                         eci[0].gt_id = gt->info.id;
581                         logical_mask = bind_exec_queue_logical_mask(xe, gt, eci,
582                                                                     args->width,
583                                                                     args->num_placements);
584                         if (XE_IOCTL_DBG(xe, !logical_mask))
585                                 return -EINVAL;
586
587                         hwe = find_hw_engine(xe, eci[0]);
588                         if (XE_IOCTL_DBG(xe, !hwe))
589                                 return -EINVAL;
590
591                         /* The migration vm doesn't hold rpm ref */
592                         xe_pm_runtime_get_noresume(xe);
593
594                         flags = EXEC_QUEUE_FLAG_VM | (id ? EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0);
595
596                         migrate_vm = xe_migrate_get_vm(gt_to_tile(gt)->migrate);
597                         new = xe_exec_queue_create(xe, migrate_vm, logical_mask,
598                                                    args->width, hwe, flags,
599                                                    args->extensions);
600
601                         xe_pm_runtime_put(xe); /* now held by engine */
602
603                         xe_vm_put(migrate_vm);
604                         if (IS_ERR(new)) {
605                                 err = PTR_ERR(new);
606                                 if (q)
607                                         goto put_exec_queue;
608                                 return err;
609                         }
610                         if (id == 0)
611                                 q = new;
612                         else
613                                 list_add_tail(&new->multi_gt_list,
614                                               &q->multi_gt_link);
615                 }
616         } else {
617                 gt = xe_device_get_gt(xe, eci[0].gt_id);
618                 logical_mask = calc_validate_logical_mask(xe, gt, eci,
619                                                           args->width,
620                                                           args->num_placements);
621                 if (XE_IOCTL_DBG(xe, !logical_mask))
622                         return -EINVAL;
623
624                 hwe = find_hw_engine(xe, eci[0]);
625                 if (XE_IOCTL_DBG(xe, !hwe))
626                         return -EINVAL;
627
628                 vm = xe_vm_lookup(xef, args->vm_id);
629                 if (XE_IOCTL_DBG(xe, !vm))
630                         return -ENOENT;
631
632                 err = down_read_interruptible(&vm->lock);
633                 if (err) {
634                         xe_vm_put(vm);
635                         return err;
636                 }
637
638                 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
639                         up_read(&vm->lock);
640                         xe_vm_put(vm);
641                         return -ENOENT;
642                 }
643
644                 q = xe_exec_queue_create(xe, vm, logical_mask,
645                                          args->width, hwe, 0,
646                                          args->extensions);
647                 up_read(&vm->lock);
648                 xe_vm_put(vm);
649                 if (IS_ERR(q))
650                         return PTR_ERR(q);
651
652                 if (xe_vm_in_preempt_fence_mode(vm)) {
653                         q->compute.context = dma_fence_context_alloc(1);
654                         spin_lock_init(&q->compute.lock);
655
656                         err = xe_vm_add_compute_exec_queue(vm, q);
657                         if (XE_IOCTL_DBG(xe, err))
658                                 goto put_exec_queue;
659                 }
660         }
661
662         mutex_lock(&xef->exec_queue.lock);
663         err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
664         mutex_unlock(&xef->exec_queue.lock);
665         if (err)
666                 goto kill_exec_queue;
667
668         args->exec_queue_id = id;
669
670         return 0;
671
672 kill_exec_queue:
673         xe_exec_queue_kill(q);
674 put_exec_queue:
675         xe_exec_queue_put(q);
676         return err;
677 }
678
679 int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
680                                      struct drm_file *file)
681 {
682         struct xe_device *xe = to_xe_device(dev);
683         struct xe_file *xef = to_xe_file(file);
684         struct drm_xe_exec_queue_get_property *args = data;
685         struct xe_exec_queue *q;
686         int ret;
687
688         if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
689                 return -EINVAL;
690
691         q = xe_exec_queue_lookup(xef, args->exec_queue_id);
692         if (XE_IOCTL_DBG(xe, !q))
693                 return -ENOENT;
694
695         switch (args->property) {
696         case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
697                 args->value = !!(q->flags & EXEC_QUEUE_FLAG_BANNED);
698                 ret = 0;
699                 break;
700         default:
701                 ret = -EINVAL;
702         }
703
704         xe_exec_queue_put(q);
705
706         return ret;
707 }
708
709 /**
710  * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
711  * @q: The exec_queue
712  *
713  * Return: True if the exec_queue is long-running, false otherwise.
714  */
715 bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
716 {
717         return q->vm && xe_vm_in_lr_mode(q->vm) &&
718                 !(q->flags & EXEC_QUEUE_FLAG_VM);
719 }
720
721 static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
722 {
723         return q->lrc->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc) - 1;
724 }
725
726 /**
727  * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
728  * @q: The exec_queue
729  *
730  * Return: True if the exec_queue's ring is full, false otherwise.
731  */
732 bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
733 {
734         struct xe_lrc *lrc = q->lrc;
735         s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
736
737         return xe_exec_queue_num_job_inflight(q) >= max_job;
738 }
739
740 /**
741  * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
742  * @q: The exec_queue
743  *
744  * FIXME: Need to determine what to use as the short-lived
745  * timeline lock for the exec_queues, so that the return value
746  * of this function becomes more than just an advisory
747  * snapshot in time. The timeline lock must protect the
748  * seqno from racing submissions on the same exec_queue.
749  * Typically vm->resv, but user-created timeline locks use the migrate vm
750  * and never grabs the migrate vm->resv so we have a race there.
751  *
752  * Return: True if the exec_queue is idle, false otherwise.
753  */
754 bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
755 {
756         if (xe_exec_queue_is_parallel(q)) {
757                 int i;
758
759                 for (i = 0; i < q->width; ++i) {
760                         if (xe_lrc_seqno(&q->lrc[i]) !=
761                             q->lrc[i].fence_ctx.next_seqno - 1)
762                                 return false;
763                 }
764
765                 return true;
766         }
767
768         return xe_lrc_seqno(&q->lrc[0]) ==
769                 q->lrc[0].fence_ctx.next_seqno - 1;
770 }
771
772 void xe_exec_queue_kill(struct xe_exec_queue *q)
773 {
774         struct xe_exec_queue *eq = q, *next;
775
776         list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
777                                  multi_gt_link) {
778                 q->ops->kill(eq);
779                 xe_vm_remove_compute_exec_queue(q->vm, eq);
780         }
781
782         q->ops->kill(q);
783         xe_vm_remove_compute_exec_queue(q->vm, q);
784 }
785
786 int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
787                                 struct drm_file *file)
788 {
789         struct xe_device *xe = to_xe_device(dev);
790         struct xe_file *xef = to_xe_file(file);
791         struct drm_xe_exec_queue_destroy *args = data;
792         struct xe_exec_queue *q;
793
794         if (XE_IOCTL_DBG(xe, args->pad) ||
795             XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
796                 return -EINVAL;
797
798         mutex_lock(&xef->exec_queue.lock);
799         q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
800         mutex_unlock(&xef->exec_queue.lock);
801         if (XE_IOCTL_DBG(xe, !q))
802                 return -ENOENT;
803
804         xe_exec_queue_kill(q);
805
806         trace_xe_exec_queue_close(q);
807         xe_exec_queue_put(q);
808
809         return 0;
810 }
811
812 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
813                                                     struct xe_vm *vm)
814 {
815         if (q->flags & EXEC_QUEUE_FLAG_VM)
816                 lockdep_assert_held(&vm->lock);
817         else
818                 xe_vm_assert_held(vm);
819 }
820
821 /**
822  * xe_exec_queue_last_fence_put() - Drop ref to last fence
823  * @q: The exec queue
824  * @vm: The VM the engine does a bind or exec for
825  */
826 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
827 {
828         xe_exec_queue_last_fence_lockdep_assert(q, vm);
829
830         if (q->last_fence) {
831                 dma_fence_put(q->last_fence);
832                 q->last_fence = NULL;
833         }
834 }
835
836 /**
837  * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
838  * @q: The exec queue
839  *
840  * Only safe to be called from xe_exec_queue_destroy().
841  */
842 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
843 {
844         if (q->last_fence) {
845                 dma_fence_put(q->last_fence);
846                 q->last_fence = NULL;
847         }
848 }
849
850 /**
851  * xe_exec_queue_last_fence_get() - Get last fence
852  * @q: The exec queue
853  * @vm: The VM the engine does a bind or exec for
854  *
855  * Get last fence, takes a ref
856  *
857  * Returns: last fence if not signaled, dma fence stub if signaled
858  */
859 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
860                                                struct xe_vm *vm)
861 {
862         struct dma_fence *fence;
863
864         xe_exec_queue_last_fence_lockdep_assert(q, vm);
865
866         if (q->last_fence &&
867             test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
868                 xe_exec_queue_last_fence_put(q, vm);
869
870         fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
871         dma_fence_get(fence);
872         return fence;
873 }
874
875 /**
876  * xe_exec_queue_last_fence_set() - Set last fence
877  * @q: The exec queue
878  * @vm: The VM the engine does a bind or exec for
879  * @fence: The fence
880  *
881  * Set the last fence for the engine. Increases reference count for fence, when
882  * closing engine xe_exec_queue_last_fence_put should be called.
883  */
884 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
885                                   struct dma_fence *fence)
886 {
887         xe_exec_queue_last_fence_lockdep_assert(q, vm);
888
889         xe_exec_queue_last_fence_put(q, vm);
890         q->last_fence = dma_fence_get(fence);
891 }
This page took 0.092732 seconds and 4 git commands to generate.