]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/gt/sysfs_engines.c
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[linux.git] / drivers / gpu / drm / i915 / gt / sysfs_engines.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include <linux/kobject.h>
7 #include <linux/sysfs.h>
8
9 #include "i915_drv.h"
10 #include "intel_engine.h"
11 #include "intel_engine_heartbeat.h"
12 #include "sysfs_engines.h"
13
14 struct kobj_engine {
15         struct kobject base;
16         struct intel_engine_cs *engine;
17 };
18
19 static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
20 {
21         return container_of(kobj, struct kobj_engine, base)->engine;
22 }
23
24 static ssize_t
25 name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
26 {
27         return sysfs_emit(buf, "%s\n", kobj_to_engine(kobj)->name);
28 }
29
30 static const struct kobj_attribute name_attr =
31 __ATTR(name, 0444, name_show, NULL);
32
33 static ssize_t
34 class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
35 {
36         return sysfs_emit(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
37 }
38
39 static const struct kobj_attribute class_attr =
40 __ATTR(class, 0444, class_show, NULL);
41
42 static ssize_t
43 inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
44 {
45         return sysfs_emit(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
46 }
47
48 static const struct kobj_attribute inst_attr =
49 __ATTR(instance, 0444, inst_show, NULL);
50
51 static ssize_t
52 mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
53 {
54         return sysfs_emit(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
55 }
56
57 static const struct kobj_attribute mmio_attr =
58 __ATTR(mmio_base, 0444, mmio_show, NULL);
59
60 static const char * const vcs_caps[] = {
61         [ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
62         [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
63 };
64
65 static const char * const vecs_caps[] = {
66         [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
67 };
68
69 static ssize_t repr_trim(char *buf, ssize_t len)
70 {
71         /* Trim off the trailing space and replace with a newline */
72         if (len > PAGE_SIZE)
73                 len = PAGE_SIZE;
74         if (len > 0)
75                 buf[len - 1] = '\n';
76
77         return len;
78 }
79
80 static ssize_t
81 __caps_show(struct intel_engine_cs *engine,
82             unsigned long caps, char *buf, bool show_unknown)
83 {
84         const char * const *repr;
85         int count, n;
86         ssize_t len;
87
88         switch (engine->class) {
89         case VIDEO_DECODE_CLASS:
90                 repr = vcs_caps;
91                 count = ARRAY_SIZE(vcs_caps);
92                 break;
93
94         case VIDEO_ENHANCEMENT_CLASS:
95                 repr = vecs_caps;
96                 count = ARRAY_SIZE(vecs_caps);
97                 break;
98
99         default:
100                 repr = NULL;
101                 count = 0;
102                 break;
103         }
104         GEM_BUG_ON(count > BITS_PER_LONG);
105
106         len = 0;
107         for_each_set_bit(n, &caps, show_unknown ? BITS_PER_LONG : count) {
108                 if (n >= count || !repr[n]) {
109                         if (GEM_WARN_ON(show_unknown))
110                                 len += sysfs_emit_at(buf, len, "[%x] ", n);
111                 } else {
112                         len += sysfs_emit_at(buf, len, "%s ", repr[n]);
113                 }
114                 if (GEM_WARN_ON(len >= PAGE_SIZE))
115                         break;
116         }
117         return repr_trim(buf, len);
118 }
119
120 static ssize_t
121 caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
122 {
123         struct intel_engine_cs *engine = kobj_to_engine(kobj);
124
125         return __caps_show(engine, engine->uabi_capabilities, buf, true);
126 }
127
128 static const struct kobj_attribute caps_attr =
129 __ATTR(capabilities, 0444, caps_show, NULL);
130
131 static ssize_t
132 all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
133 {
134         return __caps_show(kobj_to_engine(kobj), -1, buf, false);
135 }
136
137 static const struct kobj_attribute all_caps_attr =
138 __ATTR(known_capabilities, 0444, all_caps_show, NULL);
139
140 static ssize_t
141 max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
142                const char *buf, size_t count)
143 {
144         struct intel_engine_cs *engine = kobj_to_engine(kobj);
145         unsigned long long duration, clamped;
146         int err;
147
148         /*
149          * When waiting for a request, if is it currently being executed
150          * on the GPU, we busywait for a short while before sleeping. The
151          * premise is that most requests are short, and if it is already
152          * executing then there is a good chance that it will complete
153          * before we can setup the interrupt handler and go to sleep.
154          * We try to offset the cost of going to sleep, by first spinning
155          * on the request -- if it completed in less time than it would take
156          * to go sleep, process the interrupt and return back to the client,
157          * then we have saved the client some latency, albeit at the cost
158          * of spinning on an expensive CPU core.
159          *
160          * While we try to avoid waiting at all for a request that is unlikely
161          * to complete, deciding how long it is worth spinning is for is an
162          * arbitrary decision: trading off power vs latency.
163          */
164
165         err = kstrtoull(buf, 0, &duration);
166         if (err)
167                 return err;
168
169         clamped = intel_clamp_max_busywait_duration_ns(engine, duration);
170         if (duration != clamped)
171                 return -EINVAL;
172
173         WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
174
175         return count;
176 }
177
178 static ssize_t
179 max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
180 {
181         struct intel_engine_cs *engine = kobj_to_engine(kobj);
182
183         return sysfs_emit(buf, "%lu\n", engine->props.max_busywait_duration_ns);
184 }
185
186 static const struct kobj_attribute max_spin_attr =
187 __ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
188
189 static ssize_t
190 max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
191 {
192         struct intel_engine_cs *engine = kobj_to_engine(kobj);
193
194         return sysfs_emit(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
195 }
196
197 static const struct kobj_attribute max_spin_def =
198 __ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
199
200 static ssize_t
201 timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
202                 const char *buf, size_t count)
203 {
204         struct intel_engine_cs *engine = kobj_to_engine(kobj);
205         unsigned long long duration, clamped;
206         int err;
207
208         /*
209          * Execlists uses a scheduling quantum (a timeslice) to alternate
210          * execution between ready-to-run contexts of equal priority. This
211          * ensures that all users (though only if they of equal importance)
212          * have the opportunity to run and prevents livelocks where contexts
213          * may have implicit ordering due to userspace semaphores.
214          */
215
216         err = kstrtoull(buf, 0, &duration);
217         if (err)
218                 return err;
219
220         clamped = intel_clamp_timeslice_duration_ms(engine, duration);
221         if (duration != clamped)
222                 return -EINVAL;
223
224         WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
225
226         if (execlists_active(&engine->execlists))
227                 set_timer_ms(&engine->execlists.timer, duration);
228
229         return count;
230 }
231
232 static ssize_t
233 timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
234 {
235         struct intel_engine_cs *engine = kobj_to_engine(kobj);
236
237         return sysfs_emit(buf, "%lu\n", engine->props.timeslice_duration_ms);
238 }
239
240 static const struct kobj_attribute timeslice_duration_attr =
241 __ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
242
243 static ssize_t
244 timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
245 {
246         struct intel_engine_cs *engine = kobj_to_engine(kobj);
247
248         return sysfs_emit(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
249 }
250
251 static const struct kobj_attribute timeslice_duration_def =
252 __ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
253
254 static ssize_t
255 stop_store(struct kobject *kobj, struct kobj_attribute *attr,
256            const char *buf, size_t count)
257 {
258         struct intel_engine_cs *engine = kobj_to_engine(kobj);
259         unsigned long long duration, clamped;
260         int err;
261
262         /*
263          * When we allow ourselves to sleep before a GPU reset after disabling
264          * submission, even for a few milliseconds, gives an innocent context
265          * the opportunity to clear the GPU before the reset occurs. However,
266          * how long to sleep depends on the typical non-preemptible duration
267          * (a similar problem to determining the ideal preempt-reset timeout
268          * or even the heartbeat interval).
269          */
270
271         err = kstrtoull(buf, 0, &duration);
272         if (err)
273                 return err;
274
275         clamped = intel_clamp_stop_timeout_ms(engine, duration);
276         if (duration != clamped)
277                 return -EINVAL;
278
279         WRITE_ONCE(engine->props.stop_timeout_ms, duration);
280         return count;
281 }
282
283 static ssize_t
284 stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
285 {
286         struct intel_engine_cs *engine = kobj_to_engine(kobj);
287
288         return sysfs_emit(buf, "%lu\n", engine->props.stop_timeout_ms);
289 }
290
291 static const struct kobj_attribute stop_timeout_attr =
292 __ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
293
294 static ssize_t
295 stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
296 {
297         struct intel_engine_cs *engine = kobj_to_engine(kobj);
298
299         return sysfs_emit(buf, "%lu\n", engine->defaults.stop_timeout_ms);
300 }
301
302 static const struct kobj_attribute stop_timeout_def =
303 __ATTR(stop_timeout_ms, 0444, stop_default, NULL);
304
305 static ssize_t
306 preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
307                       const char *buf, size_t count)
308 {
309         struct intel_engine_cs *engine = kobj_to_engine(kobj);
310         unsigned long long timeout, clamped;
311         int err;
312
313         /*
314          * After initialising a preemption request, we give the current
315          * resident a small amount of time to vacate the GPU. The preemption
316          * request is for a higher priority context and should be immediate to
317          * maintain high quality of service (and avoid priority inversion).
318          * However, the preemption granularity of the GPU can be quite coarse
319          * and so we need a compromise.
320          */
321
322         err = kstrtoull(buf, 0, &timeout);
323         if (err)
324                 return err;
325
326         clamped = intel_clamp_preempt_timeout_ms(engine, timeout);
327         if (timeout != clamped)
328                 return -EINVAL;
329
330         WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
331
332         if (READ_ONCE(engine->execlists.pending[0]))
333                 set_timer_ms(&engine->execlists.preempt, timeout);
334
335         return count;
336 }
337
338 static ssize_t
339 preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
340                      char *buf)
341 {
342         struct intel_engine_cs *engine = kobj_to_engine(kobj);
343
344         return sysfs_emit(buf, "%lu\n", engine->props.preempt_timeout_ms);
345 }
346
347 static const struct kobj_attribute preempt_timeout_attr =
348 __ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
349
350 static ssize_t
351 preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
352                         char *buf)
353 {
354         struct intel_engine_cs *engine = kobj_to_engine(kobj);
355
356         return sysfs_emit(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
357 }
358
359 static const struct kobj_attribute preempt_timeout_def =
360 __ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
361
362 static ssize_t
363 heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
364                 const char *buf, size_t count)
365 {
366         struct intel_engine_cs *engine = kobj_to_engine(kobj);
367         unsigned long long delay, clamped;
368         int err;
369
370         /*
371          * We monitor the health of the system via periodic heartbeat pulses.
372          * The pulses also provide the opportunity to perform garbage
373          * collection.  However, we interpret an incomplete pulse (a missed
374          * heartbeat) as an indication that the system is no longer responsive,
375          * i.e. hung, and perform an engine or full GPU reset. Given that the
376          * preemption granularity can be very coarse on a system, the optimal
377          * value for any workload is unknowable!
378          */
379
380         err = kstrtoull(buf, 0, &delay);
381         if (err)
382                 return err;
383
384         clamped = intel_clamp_heartbeat_interval_ms(engine, delay);
385         if (delay != clamped)
386                 return -EINVAL;
387
388         err = intel_engine_set_heartbeat(engine, delay);
389         if (err)
390                 return err;
391
392         return count;
393 }
394
395 static ssize_t
396 heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
397 {
398         struct intel_engine_cs *engine = kobj_to_engine(kobj);
399
400         return sysfs_emit(buf, "%lu\n", engine->props.heartbeat_interval_ms);
401 }
402
403 static const struct kobj_attribute heartbeat_interval_attr =
404 __ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
405
406 static ssize_t
407 heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
408 {
409         struct intel_engine_cs *engine = kobj_to_engine(kobj);
410
411         return sysfs_emit(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
412 }
413
414 static const struct kobj_attribute heartbeat_interval_def =
415 __ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
416
417 static void kobj_engine_release(struct kobject *kobj)
418 {
419         kfree(kobj);
420 }
421
422 static const struct kobj_type kobj_engine_type = {
423         .release = kobj_engine_release,
424         .sysfs_ops = &kobj_sysfs_ops
425 };
426
427 static struct kobject *
428 kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
429 {
430         struct kobj_engine *ke;
431
432         ke = kzalloc(sizeof(*ke), GFP_KERNEL);
433         if (!ke)
434                 return NULL;
435
436         kobject_init(&ke->base, &kobj_engine_type);
437         ke->engine = engine;
438
439         if (kobject_add(&ke->base, dir, "%s", engine->name)) {
440                 kobject_put(&ke->base);
441                 return NULL;
442         }
443
444         /* xfer ownership to sysfs tree */
445         return &ke->base;
446 }
447
448 static void add_defaults(struct kobj_engine *parent)
449 {
450         static const struct attribute * const files[] = {
451                 &max_spin_def.attr,
452                 &stop_timeout_def.attr,
453 #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
454                 &heartbeat_interval_def.attr,
455 #endif
456                 NULL
457         };
458         struct kobj_engine *ke;
459
460         ke = kzalloc(sizeof(*ke), GFP_KERNEL);
461         if (!ke)
462                 return;
463
464         kobject_init(&ke->base, &kobj_engine_type);
465         ke->engine = parent->engine;
466
467         if (kobject_add(&ke->base, &parent->base, "%s", ".defaults")) {
468                 kobject_put(&ke->base);
469                 return;
470         }
471
472         if (sysfs_create_files(&ke->base, files))
473                 return;
474
475         if (intel_engine_has_timeslices(ke->engine) &&
476             sysfs_create_file(&ke->base, &timeslice_duration_def.attr))
477                 return;
478
479         if (intel_engine_has_preempt_reset(ke->engine) &&
480             sysfs_create_file(&ke->base, &preempt_timeout_def.attr))
481                 return;
482 }
483
484 void intel_engines_add_sysfs(struct drm_i915_private *i915)
485 {
486         static const struct attribute * const files[] = {
487                 &name_attr.attr,
488                 &class_attr.attr,
489                 &inst_attr.attr,
490                 &mmio_attr.attr,
491                 &caps_attr.attr,
492                 &all_caps_attr.attr,
493                 &max_spin_attr.attr,
494                 &stop_timeout_attr.attr,
495 #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
496                 &heartbeat_interval_attr.attr,
497 #endif
498                 NULL
499         };
500
501         struct device *kdev = i915->drm.primary->kdev;
502         struct intel_engine_cs *engine;
503         struct kobject *dir;
504
505         dir = kobject_create_and_add("engine", &kdev->kobj);
506         if (!dir)
507                 return;
508
509         for_each_uabi_engine(engine, i915) {
510                 struct kobject *kobj;
511
512                 kobj = kobj_engine(dir, engine);
513                 if (!kobj)
514                         goto err_engine;
515
516                 if (sysfs_create_files(kobj, files))
517                         goto err_object;
518
519                 if (intel_engine_has_timeslices(engine) &&
520                     sysfs_create_file(kobj, &timeslice_duration_attr.attr))
521                         goto err_engine;
522
523                 if (intel_engine_has_preempt_reset(engine) &&
524                     sysfs_create_file(kobj, &preempt_timeout_attr.attr))
525                         goto err_engine;
526
527                 add_defaults(container_of(kobj, struct kobj_engine, base));
528
529                 if (0) {
530 err_object:
531                         kobject_put(kobj);
532 err_engine:
533                         dev_err(kdev, "Failed to add sysfs engine '%s'\n",
534                                 engine->name);
535                         break;
536                 }
537         }
538 }
This page took 0.065487 seconds and 4 git commands to generate.