1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include <linux/kobject.h>
7 #include <linux/sysfs.h>
10 #include "intel_engine.h"
11 #include "intel_engine_heartbeat.h"
12 #include "sysfs_engines.h"
16 struct intel_engine_cs *engine;
19 static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
21 return container_of(kobj, struct kobj_engine, base)->engine;
25 name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
27 return sysfs_emit(buf, "%s\n", kobj_to_engine(kobj)->name);
30 static const struct kobj_attribute name_attr =
31 __ATTR(name, 0444, name_show, NULL);
34 class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
36 return sysfs_emit(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
39 static const struct kobj_attribute class_attr =
40 __ATTR(class, 0444, class_show, NULL);
43 inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
45 return sysfs_emit(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
48 static const struct kobj_attribute inst_attr =
49 __ATTR(instance, 0444, inst_show, NULL);
52 mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
54 return sysfs_emit(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
57 static const struct kobj_attribute mmio_attr =
58 __ATTR(mmio_base, 0444, mmio_show, NULL);
60 static const char * const vcs_caps[] = {
61 [ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
62 [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
65 static const char * const vecs_caps[] = {
66 [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
69 static ssize_t repr_trim(char *buf, ssize_t len)
71 /* Trim off the trailing space and replace with a newline */
81 __caps_show(struct intel_engine_cs *engine,
82 unsigned long caps, char *buf, bool show_unknown)
84 const char * const *repr;
88 switch (engine->class) {
89 case VIDEO_DECODE_CLASS:
91 count = ARRAY_SIZE(vcs_caps);
94 case VIDEO_ENHANCEMENT_CLASS:
96 count = ARRAY_SIZE(vecs_caps);
104 GEM_BUG_ON(count > BITS_PER_LONG);
107 for_each_set_bit(n, &caps, show_unknown ? BITS_PER_LONG : count) {
108 if (n >= count || !repr[n]) {
109 if (GEM_WARN_ON(show_unknown))
110 len += sysfs_emit_at(buf, len, "[%x] ", n);
112 len += sysfs_emit_at(buf, len, "%s ", repr[n]);
114 if (GEM_WARN_ON(len >= PAGE_SIZE))
117 return repr_trim(buf, len);
121 caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
123 struct intel_engine_cs *engine = kobj_to_engine(kobj);
125 return __caps_show(engine, engine->uabi_capabilities, buf, true);
128 static const struct kobj_attribute caps_attr =
129 __ATTR(capabilities, 0444, caps_show, NULL);
132 all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
134 return __caps_show(kobj_to_engine(kobj), -1, buf, false);
137 static const struct kobj_attribute all_caps_attr =
138 __ATTR(known_capabilities, 0444, all_caps_show, NULL);
141 max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
142 const char *buf, size_t count)
144 struct intel_engine_cs *engine = kobj_to_engine(kobj);
145 unsigned long long duration, clamped;
149 * When waiting for a request, if is it currently being executed
150 * on the GPU, we busywait for a short while before sleeping. The
151 * premise is that most requests are short, and if it is already
152 * executing then there is a good chance that it will complete
153 * before we can setup the interrupt handler and go to sleep.
154 * We try to offset the cost of going to sleep, by first spinning
155 * on the request -- if it completed in less time than it would take
156 * to go sleep, process the interrupt and return back to the client,
157 * then we have saved the client some latency, albeit at the cost
158 * of spinning on an expensive CPU core.
160 * While we try to avoid waiting at all for a request that is unlikely
161 * to complete, deciding how long it is worth spinning is for is an
162 * arbitrary decision: trading off power vs latency.
165 err = kstrtoull(buf, 0, &duration);
169 clamped = intel_clamp_max_busywait_duration_ns(engine, duration);
170 if (duration != clamped)
173 WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
179 max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
181 struct intel_engine_cs *engine = kobj_to_engine(kobj);
183 return sysfs_emit(buf, "%lu\n", engine->props.max_busywait_duration_ns);
186 static const struct kobj_attribute max_spin_attr =
187 __ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
190 max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
192 struct intel_engine_cs *engine = kobj_to_engine(kobj);
194 return sysfs_emit(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
197 static const struct kobj_attribute max_spin_def =
198 __ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
201 timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
202 const char *buf, size_t count)
204 struct intel_engine_cs *engine = kobj_to_engine(kobj);
205 unsigned long long duration, clamped;
209 * Execlists uses a scheduling quantum (a timeslice) to alternate
210 * execution between ready-to-run contexts of equal priority. This
211 * ensures that all users (though only if they of equal importance)
212 * have the opportunity to run and prevents livelocks where contexts
213 * may have implicit ordering due to userspace semaphores.
216 err = kstrtoull(buf, 0, &duration);
220 clamped = intel_clamp_timeslice_duration_ms(engine, duration);
221 if (duration != clamped)
224 WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
226 if (execlists_active(&engine->execlists))
227 set_timer_ms(&engine->execlists.timer, duration);
233 timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
235 struct intel_engine_cs *engine = kobj_to_engine(kobj);
237 return sysfs_emit(buf, "%lu\n", engine->props.timeslice_duration_ms);
240 static const struct kobj_attribute timeslice_duration_attr =
241 __ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
244 timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
246 struct intel_engine_cs *engine = kobj_to_engine(kobj);
248 return sysfs_emit(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
251 static const struct kobj_attribute timeslice_duration_def =
252 __ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
255 stop_store(struct kobject *kobj, struct kobj_attribute *attr,
256 const char *buf, size_t count)
258 struct intel_engine_cs *engine = kobj_to_engine(kobj);
259 unsigned long long duration, clamped;
263 * When we allow ourselves to sleep before a GPU reset after disabling
264 * submission, even for a few milliseconds, gives an innocent context
265 * the opportunity to clear the GPU before the reset occurs. However,
266 * how long to sleep depends on the typical non-preemptible duration
267 * (a similar problem to determining the ideal preempt-reset timeout
268 * or even the heartbeat interval).
271 err = kstrtoull(buf, 0, &duration);
275 clamped = intel_clamp_stop_timeout_ms(engine, duration);
276 if (duration != clamped)
279 WRITE_ONCE(engine->props.stop_timeout_ms, duration);
284 stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
286 struct intel_engine_cs *engine = kobj_to_engine(kobj);
288 return sysfs_emit(buf, "%lu\n", engine->props.stop_timeout_ms);
291 static const struct kobj_attribute stop_timeout_attr =
292 __ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
295 stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
297 struct intel_engine_cs *engine = kobj_to_engine(kobj);
299 return sysfs_emit(buf, "%lu\n", engine->defaults.stop_timeout_ms);
302 static const struct kobj_attribute stop_timeout_def =
303 __ATTR(stop_timeout_ms, 0444, stop_default, NULL);
306 preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
307 const char *buf, size_t count)
309 struct intel_engine_cs *engine = kobj_to_engine(kobj);
310 unsigned long long timeout, clamped;
314 * After initialising a preemption request, we give the current
315 * resident a small amount of time to vacate the GPU. The preemption
316 * request is for a higher priority context and should be immediate to
317 * maintain high quality of service (and avoid priority inversion).
318 * However, the preemption granularity of the GPU can be quite coarse
319 * and so we need a compromise.
322 err = kstrtoull(buf, 0, &timeout);
326 clamped = intel_clamp_preempt_timeout_ms(engine, timeout);
327 if (timeout != clamped)
330 WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
332 if (READ_ONCE(engine->execlists.pending[0]))
333 set_timer_ms(&engine->execlists.preempt, timeout);
339 preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
342 struct intel_engine_cs *engine = kobj_to_engine(kobj);
344 return sysfs_emit(buf, "%lu\n", engine->props.preempt_timeout_ms);
347 static const struct kobj_attribute preempt_timeout_attr =
348 __ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
351 preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
354 struct intel_engine_cs *engine = kobj_to_engine(kobj);
356 return sysfs_emit(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
359 static const struct kobj_attribute preempt_timeout_def =
360 __ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
363 heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
364 const char *buf, size_t count)
366 struct intel_engine_cs *engine = kobj_to_engine(kobj);
367 unsigned long long delay, clamped;
371 * We monitor the health of the system via periodic heartbeat pulses.
372 * The pulses also provide the opportunity to perform garbage
373 * collection. However, we interpret an incomplete pulse (a missed
374 * heartbeat) as an indication that the system is no longer responsive,
375 * i.e. hung, and perform an engine or full GPU reset. Given that the
376 * preemption granularity can be very coarse on a system, the optimal
377 * value for any workload is unknowable!
380 err = kstrtoull(buf, 0, &delay);
384 clamped = intel_clamp_heartbeat_interval_ms(engine, delay);
385 if (delay != clamped)
388 err = intel_engine_set_heartbeat(engine, delay);
396 heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
398 struct intel_engine_cs *engine = kobj_to_engine(kobj);
400 return sysfs_emit(buf, "%lu\n", engine->props.heartbeat_interval_ms);
403 static const struct kobj_attribute heartbeat_interval_attr =
404 __ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
407 heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
409 struct intel_engine_cs *engine = kobj_to_engine(kobj);
411 return sysfs_emit(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
414 static const struct kobj_attribute heartbeat_interval_def =
415 __ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
417 static void kobj_engine_release(struct kobject *kobj)
422 static const struct kobj_type kobj_engine_type = {
423 .release = kobj_engine_release,
424 .sysfs_ops = &kobj_sysfs_ops
427 static struct kobject *
428 kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
430 struct kobj_engine *ke;
432 ke = kzalloc(sizeof(*ke), GFP_KERNEL);
436 kobject_init(&ke->base, &kobj_engine_type);
439 if (kobject_add(&ke->base, dir, "%s", engine->name)) {
440 kobject_put(&ke->base);
444 /* xfer ownership to sysfs tree */
448 static void add_defaults(struct kobj_engine *parent)
450 static const struct attribute * const files[] = {
452 &stop_timeout_def.attr,
453 #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
454 &heartbeat_interval_def.attr,
458 struct kobj_engine *ke;
460 ke = kzalloc(sizeof(*ke), GFP_KERNEL);
464 kobject_init(&ke->base, &kobj_engine_type);
465 ke->engine = parent->engine;
467 if (kobject_add(&ke->base, &parent->base, "%s", ".defaults")) {
468 kobject_put(&ke->base);
472 if (sysfs_create_files(&ke->base, files))
475 if (intel_engine_has_timeslices(ke->engine) &&
476 sysfs_create_file(&ke->base, ×lice_duration_def.attr))
479 if (intel_engine_has_preempt_reset(ke->engine) &&
480 sysfs_create_file(&ke->base, &preempt_timeout_def.attr))
484 void intel_engines_add_sysfs(struct drm_i915_private *i915)
486 static const struct attribute * const files[] = {
494 &stop_timeout_attr.attr,
495 #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
496 &heartbeat_interval_attr.attr,
501 struct device *kdev = i915->drm.primary->kdev;
502 struct intel_engine_cs *engine;
505 dir = kobject_create_and_add("engine", &kdev->kobj);
509 for_each_uabi_engine(engine, i915) {
510 struct kobject *kobj;
512 kobj = kobj_engine(dir, engine);
516 if (sysfs_create_files(kobj, files))
519 if (intel_engine_has_timeslices(engine) &&
520 sysfs_create_file(kobj, ×lice_duration_attr.attr))
523 if (intel_engine_has_preempt_reset(engine) &&
524 sysfs_create_file(kobj, &preempt_timeout_attr.attr))
527 add_defaults(container_of(kobj, struct kobj_engine, base));
533 dev_err(kdev, "Failed to add sysfs engine '%s'\n",