2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include <linux/sched/mm.h>
30 #include <linux/sort.h>
32 #include <drm/drm_debugfs.h>
33 #include <drm/drm_fourcc.h>
35 #include "display/intel_display_types.h"
36 #include "display/intel_dp.h"
37 #include "display/intel_fbc.h"
38 #include "display/intel_hdcp.h"
39 #include "display/intel_hdmi.h"
40 #include "display/intel_psr.h"
42 #include "gem/i915_gem_context.h"
43 #include "gt/intel_gt_pm.h"
44 #include "gt/intel_gt_requests.h"
45 #include "gt/intel_reset.h"
46 #include "gt/intel_rc6.h"
47 #include "gt/intel_rps.h"
48 #include "gt/uc/intel_guc_submission.h"
50 #include "i915_debugfs.h"
52 #include "i915_trace.h"
53 #include "intel_csr.h"
55 #include "intel_sideband.h"
57 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
59 return to_i915(node->minor->dev);
62 static int i915_capabilities(struct seq_file *m, void *data)
64 struct drm_i915_private *dev_priv = node_to_i915(m->private);
65 const struct intel_device_info *info = INTEL_INFO(dev_priv);
66 struct drm_printer p = drm_seq_file_printer(m);
69 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
70 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
71 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
74 #ifdef CONFIG_INTEL_IOMMU
75 msg = enableddisabled(intel_iommu_gfx_mapped);
77 seq_printf(m, "iommu: %s\n", msg);
79 intel_device_info_dump_flags(info, &p);
80 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
81 intel_driver_caps_print(&dev_priv->caps, &p);
83 kernel_param_lock(THIS_MODULE);
84 i915_params_dump(&i915_modparams, &p);
85 kernel_param_unlock(THIS_MODULE);
90 static char get_tiling_flag(struct drm_i915_gem_object *obj)
92 switch (i915_gem_object_get_tiling(obj)) {
94 case I915_TILING_NONE: return ' ';
95 case I915_TILING_X: return 'X';
96 case I915_TILING_Y: return 'Y';
100 static char get_global_flag(struct drm_i915_gem_object *obj)
102 return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
105 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
107 return obj->mm.mapping ? 'M' : ' ';
111 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
115 switch (page_sizes) {
118 case I915_GTT_PAGE_SIZE_4K:
120 case I915_GTT_PAGE_SIZE_64K:
122 case I915_GTT_PAGE_SIZE_2M:
128 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
129 x += snprintf(buf + x, len - x, "2M, ");
130 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
131 x += snprintf(buf + x, len - x, "64K, ");
132 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
133 x += snprintf(buf + x, len - x, "4K, ");
141 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
143 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
144 struct intel_engine_cs *engine;
145 struct i915_vma *vma;
148 seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
150 get_tiling_flag(obj),
151 get_global_flag(obj),
152 get_pin_mapped_flag(obj),
153 obj->base.size / 1024,
156 i915_cache_level_str(dev_priv, obj->cache_level),
157 obj->mm.dirty ? " dirty" : "",
158 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
160 seq_printf(m, " (name: %d)", obj->base.name);
162 spin_lock(&obj->vma.lock);
163 list_for_each_entry(vma, &obj->vma.list, obj_link) {
164 if (!drm_mm_node_allocated(&vma->node))
167 spin_unlock(&obj->vma.lock);
169 if (i915_vma_is_pinned(vma))
172 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
173 i915_vma_is_ggtt(vma) ? "g" : "pp",
174 vma->node.start, vma->node.size,
175 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
176 if (i915_vma_is_ggtt(vma)) {
177 switch (vma->ggtt_view.type) {
178 case I915_GGTT_VIEW_NORMAL:
179 seq_puts(m, ", normal");
182 case I915_GGTT_VIEW_PARTIAL:
183 seq_printf(m, ", partial [%08llx+%x]",
184 vma->ggtt_view.partial.offset << PAGE_SHIFT,
185 vma->ggtt_view.partial.size << PAGE_SHIFT);
188 case I915_GGTT_VIEW_ROTATED:
189 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
190 vma->ggtt_view.rotated.plane[0].width,
191 vma->ggtt_view.rotated.plane[0].height,
192 vma->ggtt_view.rotated.plane[0].stride,
193 vma->ggtt_view.rotated.plane[0].offset,
194 vma->ggtt_view.rotated.plane[1].width,
195 vma->ggtt_view.rotated.plane[1].height,
196 vma->ggtt_view.rotated.plane[1].stride,
197 vma->ggtt_view.rotated.plane[1].offset);
200 case I915_GGTT_VIEW_REMAPPED:
201 seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
202 vma->ggtt_view.remapped.plane[0].width,
203 vma->ggtt_view.remapped.plane[0].height,
204 vma->ggtt_view.remapped.plane[0].stride,
205 vma->ggtt_view.remapped.plane[0].offset,
206 vma->ggtt_view.remapped.plane[1].width,
207 vma->ggtt_view.remapped.plane[1].height,
208 vma->ggtt_view.remapped.plane[1].stride,
209 vma->ggtt_view.remapped.plane[1].offset);
213 MISSING_CASE(vma->ggtt_view.type);
218 seq_printf(m, " , fence: %d", vma->fence->id);
221 spin_lock(&obj->vma.lock);
223 spin_unlock(&obj->vma.lock);
225 seq_printf(m, " (pinned x %d)", pin_count);
227 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
228 if (i915_gem_object_is_framebuffer(obj))
229 seq_printf(m, " (fb)");
231 engine = i915_gem_object_last_write_engine(obj);
233 seq_printf(m, " (%s)", engine->name);
237 struct i915_address_space *vm;
240 u64 active, inactive;
244 static int per_file_stats(int id, void *ptr, void *data)
246 struct drm_i915_gem_object *obj = ptr;
247 struct file_stats *stats = data;
248 struct i915_vma *vma;
250 if (!kref_get_unless_zero(&obj->base.refcount))
254 stats->total += obj->base.size;
255 if (!atomic_read(&obj->bind_count))
256 stats->unbound += obj->base.size;
258 spin_lock(&obj->vma.lock);
260 for_each_ggtt_vma(vma, obj) {
261 if (!drm_mm_node_allocated(&vma->node))
264 if (i915_vma_is_active(vma))
265 stats->active += vma->node.size;
267 stats->inactive += vma->node.size;
269 if (i915_vma_is_closed(vma))
270 stats->closed += vma->node.size;
273 struct rb_node *p = obj->vma.tree.rb_node;
278 vma = rb_entry(p, typeof(*vma), obj_node);
279 cmp = i915_vma_compare(vma, stats->vm, NULL);
281 if (drm_mm_node_allocated(&vma->node)) {
282 if (i915_vma_is_active(vma))
283 stats->active += vma->node.size;
285 stats->inactive += vma->node.size;
287 if (i915_vma_is_closed(vma))
288 stats->closed += vma->node.size;
298 spin_unlock(&obj->vma.lock);
300 i915_gem_object_put(obj);
304 #define print_file_stats(m, name, stats) do { \
306 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
316 static void print_context_stats(struct seq_file *m,
317 struct drm_i915_private *i915)
319 struct file_stats kstats = {};
320 struct i915_gem_context *ctx, *cn;
322 spin_lock(&i915->gem.contexts.lock);
323 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
324 struct i915_gem_engines_iter it;
325 struct intel_context *ce;
327 if (!kref_get_unless_zero(&ctx->ref))
330 spin_unlock(&i915->gem.contexts.lock);
332 for_each_gem_engine(ce,
333 i915_gem_context_lock_engines(ctx), it) {
334 intel_context_lock_pinned(ce);
335 if (intel_context_is_pinned(ce)) {
339 ce->state->obj, &kstats);
340 per_file_stats(0, ce->ring->vma->obj, &kstats);
343 intel_context_unlock_pinned(ce);
345 i915_gem_context_unlock_engines(ctx);
347 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
348 struct file_stats stats = {
349 .vm = rcu_access_pointer(ctx->vm),
351 struct drm_file *file = ctx->file_priv->file;
352 struct task_struct *task;
356 idr_for_each(&file->object_idr, per_file_stats, &stats);
360 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
361 snprintf(name, sizeof(name), "%s",
362 task ? task->comm : "<unknown>");
365 print_file_stats(m, name, stats);
368 spin_lock(&i915->gem.contexts.lock);
369 list_safe_reset_next(ctx, cn, link);
370 i915_gem_context_put(ctx);
372 spin_unlock(&i915->gem.contexts.lock);
374 print_file_stats(m, "[k]contexts", kstats);
377 static int i915_gem_object_info(struct seq_file *m, void *data)
379 struct drm_i915_private *i915 = node_to_i915(m->private);
381 seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
382 i915->mm.shrink_count,
383 atomic_read(&i915->mm.free_count),
384 i915->mm.shrink_memory);
388 print_context_stats(m, i915);
393 static void gen8_display_interrupt_info(struct seq_file *m)
395 struct drm_i915_private *dev_priv = node_to_i915(m->private);
398 for_each_pipe(dev_priv, pipe) {
399 enum intel_display_power_domain power_domain;
400 intel_wakeref_t wakeref;
402 power_domain = POWER_DOMAIN_PIPE(pipe);
403 wakeref = intel_display_power_get_if_enabled(dev_priv,
406 seq_printf(m, "Pipe %c power disabled\n",
410 seq_printf(m, "Pipe %c IMR:\t%08x\n",
412 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
413 seq_printf(m, "Pipe %c IIR:\t%08x\n",
415 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
416 seq_printf(m, "Pipe %c IER:\t%08x\n",
418 I915_READ(GEN8_DE_PIPE_IER(pipe)));
420 intel_display_power_put(dev_priv, power_domain, wakeref);
423 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
424 I915_READ(GEN8_DE_PORT_IMR));
425 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
426 I915_READ(GEN8_DE_PORT_IIR));
427 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
428 I915_READ(GEN8_DE_PORT_IER));
430 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
431 I915_READ(GEN8_DE_MISC_IMR));
432 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
433 I915_READ(GEN8_DE_MISC_IIR));
434 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
435 I915_READ(GEN8_DE_MISC_IER));
437 seq_printf(m, "PCU interrupt mask:\t%08x\n",
438 I915_READ(GEN8_PCU_IMR));
439 seq_printf(m, "PCU interrupt identity:\t%08x\n",
440 I915_READ(GEN8_PCU_IIR));
441 seq_printf(m, "PCU interrupt enable:\t%08x\n",
442 I915_READ(GEN8_PCU_IER));
445 static int i915_interrupt_info(struct seq_file *m, void *data)
447 struct drm_i915_private *dev_priv = node_to_i915(m->private);
448 struct intel_engine_cs *engine;
449 intel_wakeref_t wakeref;
452 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
454 if (IS_CHERRYVIEW(dev_priv)) {
455 intel_wakeref_t pref;
457 seq_printf(m, "Master Interrupt Control:\t%08x\n",
458 I915_READ(GEN8_MASTER_IRQ));
460 seq_printf(m, "Display IER:\t%08x\n",
462 seq_printf(m, "Display IIR:\t%08x\n",
464 seq_printf(m, "Display IIR_RW:\t%08x\n",
465 I915_READ(VLV_IIR_RW));
466 seq_printf(m, "Display IMR:\t%08x\n",
468 for_each_pipe(dev_priv, pipe) {
469 enum intel_display_power_domain power_domain;
471 power_domain = POWER_DOMAIN_PIPE(pipe);
472 pref = intel_display_power_get_if_enabled(dev_priv,
475 seq_printf(m, "Pipe %c power disabled\n",
480 seq_printf(m, "Pipe %c stat:\t%08x\n",
482 I915_READ(PIPESTAT(pipe)));
484 intel_display_power_put(dev_priv, power_domain, pref);
487 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
488 seq_printf(m, "Port hotplug:\t%08x\n",
489 I915_READ(PORT_HOTPLUG_EN));
490 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
491 I915_READ(VLV_DPFLIPSTAT));
492 seq_printf(m, "DPINVGTT:\t%08x\n",
493 I915_READ(DPINVGTT));
494 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
496 for (i = 0; i < 4; i++) {
497 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
498 i, I915_READ(GEN8_GT_IMR(i)));
499 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
500 i, I915_READ(GEN8_GT_IIR(i)));
501 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
502 i, I915_READ(GEN8_GT_IER(i)));
505 seq_printf(m, "PCU interrupt mask:\t%08x\n",
506 I915_READ(GEN8_PCU_IMR));
507 seq_printf(m, "PCU interrupt identity:\t%08x\n",
508 I915_READ(GEN8_PCU_IIR));
509 seq_printf(m, "PCU interrupt enable:\t%08x\n",
510 I915_READ(GEN8_PCU_IER));
511 } else if (INTEL_GEN(dev_priv) >= 11) {
512 seq_printf(m, "Master Interrupt Control: %08x\n",
513 I915_READ(GEN11_GFX_MSTR_IRQ));
515 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
516 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
517 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
518 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
519 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
520 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
521 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
522 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
523 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
524 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
525 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
526 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
528 seq_printf(m, "Display Interrupt Control:\t%08x\n",
529 I915_READ(GEN11_DISPLAY_INT_CTL));
531 gen8_display_interrupt_info(m);
532 } else if (INTEL_GEN(dev_priv) >= 8) {
533 seq_printf(m, "Master Interrupt Control:\t%08x\n",
534 I915_READ(GEN8_MASTER_IRQ));
536 for (i = 0; i < 4; i++) {
537 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
538 i, I915_READ(GEN8_GT_IMR(i)));
539 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
540 i, I915_READ(GEN8_GT_IIR(i)));
541 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
542 i, I915_READ(GEN8_GT_IER(i)));
545 gen8_display_interrupt_info(m);
546 } else if (IS_VALLEYVIEW(dev_priv)) {
547 intel_wakeref_t pref;
549 seq_printf(m, "Display IER:\t%08x\n",
551 seq_printf(m, "Display IIR:\t%08x\n",
553 seq_printf(m, "Display IIR_RW:\t%08x\n",
554 I915_READ(VLV_IIR_RW));
555 seq_printf(m, "Display IMR:\t%08x\n",
557 for_each_pipe(dev_priv, pipe) {
558 enum intel_display_power_domain power_domain;
560 power_domain = POWER_DOMAIN_PIPE(pipe);
561 pref = intel_display_power_get_if_enabled(dev_priv,
564 seq_printf(m, "Pipe %c power disabled\n",
569 seq_printf(m, "Pipe %c stat:\t%08x\n",
571 I915_READ(PIPESTAT(pipe)));
572 intel_display_power_put(dev_priv, power_domain, pref);
575 seq_printf(m, "Master IER:\t%08x\n",
576 I915_READ(VLV_MASTER_IER));
578 seq_printf(m, "Render IER:\t%08x\n",
580 seq_printf(m, "Render IIR:\t%08x\n",
582 seq_printf(m, "Render IMR:\t%08x\n",
585 seq_printf(m, "PM IER:\t\t%08x\n",
586 I915_READ(GEN6_PMIER));
587 seq_printf(m, "PM IIR:\t\t%08x\n",
588 I915_READ(GEN6_PMIIR));
589 seq_printf(m, "PM IMR:\t\t%08x\n",
590 I915_READ(GEN6_PMIMR));
592 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
593 seq_printf(m, "Port hotplug:\t%08x\n",
594 I915_READ(PORT_HOTPLUG_EN));
595 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
596 I915_READ(VLV_DPFLIPSTAT));
597 seq_printf(m, "DPINVGTT:\t%08x\n",
598 I915_READ(DPINVGTT));
599 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
601 } else if (!HAS_PCH_SPLIT(dev_priv)) {
602 seq_printf(m, "Interrupt enable: %08x\n",
603 I915_READ(GEN2_IER));
604 seq_printf(m, "Interrupt identity: %08x\n",
605 I915_READ(GEN2_IIR));
606 seq_printf(m, "Interrupt mask: %08x\n",
607 I915_READ(GEN2_IMR));
608 for_each_pipe(dev_priv, pipe)
609 seq_printf(m, "Pipe %c stat: %08x\n",
611 I915_READ(PIPESTAT(pipe)));
613 seq_printf(m, "North Display Interrupt enable: %08x\n",
615 seq_printf(m, "North Display Interrupt identity: %08x\n",
617 seq_printf(m, "North Display Interrupt mask: %08x\n",
619 seq_printf(m, "South Display Interrupt enable: %08x\n",
621 seq_printf(m, "South Display Interrupt identity: %08x\n",
623 seq_printf(m, "South Display Interrupt mask: %08x\n",
625 seq_printf(m, "Graphics Interrupt enable: %08x\n",
627 seq_printf(m, "Graphics Interrupt identity: %08x\n",
629 seq_printf(m, "Graphics Interrupt mask: %08x\n",
633 if (INTEL_GEN(dev_priv) >= 11) {
634 seq_printf(m, "RCS Intr Mask:\t %08x\n",
635 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
636 seq_printf(m, "BCS Intr Mask:\t %08x\n",
637 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
638 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
639 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
640 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
641 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
642 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
643 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
644 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
645 I915_READ(GEN11_GUC_SG_INTR_MASK));
646 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
647 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
648 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
649 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
650 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
651 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
653 } else if (INTEL_GEN(dev_priv) >= 6) {
654 for_each_uabi_engine(engine, dev_priv) {
656 "Graphics Interrupt mask (%s): %08x\n",
657 engine->name, ENGINE_READ(engine, RING_IMR));
661 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
666 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
668 struct drm_i915_private *i915 = node_to_i915(m->private);
671 seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
674 for (i = 0; i < i915->ggtt.num_fences; i++) {
675 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
676 struct i915_vma *vma = reg->vma;
678 seq_printf(m, "Fence %d, pin count = %d, object = ",
679 i, atomic_read(®->pin_count));
681 seq_puts(m, "unused");
683 describe_obj(m, vma->obj);
691 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
692 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
693 size_t count, loff_t *pos)
695 struct i915_gpu_state *error;
699 error = file->private_data;
703 /* Bounce buffer required because of kernfs __user API convenience. */
704 buf = kmalloc(count, GFP_KERNEL);
708 ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
712 if (!copy_to_user(ubuf, buf, ret))
722 static int gpu_state_release(struct inode *inode, struct file *file)
724 i915_gpu_state_put(file->private_data);
728 static int i915_gpu_info_open(struct inode *inode, struct file *file)
730 struct drm_i915_private *i915 = inode->i_private;
731 struct i915_gpu_state *gpu;
732 intel_wakeref_t wakeref;
735 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
736 gpu = i915_capture_gpu_state(i915);
740 file->private_data = gpu;
744 static const struct file_operations i915_gpu_info_fops = {
745 .owner = THIS_MODULE,
746 .open = i915_gpu_info_open,
747 .read = gpu_state_read,
748 .llseek = default_llseek,
749 .release = gpu_state_release,
753 i915_error_state_write(struct file *filp,
754 const char __user *ubuf,
758 struct i915_gpu_state *error = filp->private_data;
763 DRM_DEBUG_DRIVER("Resetting error state\n");
764 i915_reset_error_state(error->i915);
769 static int i915_error_state_open(struct inode *inode, struct file *file)
771 struct i915_gpu_state *error;
773 error = i915_first_error_state(inode->i_private);
775 return PTR_ERR(error);
777 file->private_data = error;
781 static const struct file_operations i915_error_state_fops = {
782 .owner = THIS_MODULE,
783 .open = i915_error_state_open,
784 .read = gpu_state_read,
785 .write = i915_error_state_write,
786 .llseek = default_llseek,
787 .release = gpu_state_release,
791 static int i915_frequency_info(struct seq_file *m, void *unused)
793 struct drm_i915_private *dev_priv = node_to_i915(m->private);
794 struct intel_uncore *uncore = &dev_priv->uncore;
795 struct intel_rps *rps = &dev_priv->gt.rps;
796 intel_wakeref_t wakeref;
799 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
801 if (IS_GEN(dev_priv, 5)) {
802 u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
803 u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
805 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
806 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
807 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
809 seq_printf(m, "Current P-state: %d\n",
810 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
811 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
812 u32 rpmodectl, freq_sts;
814 rpmodectl = I915_READ(GEN6_RP_CONTROL);
815 seq_printf(m, "Video Turbo Mode: %s\n",
816 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
817 seq_printf(m, "HW control enabled: %s\n",
818 yesno(rpmodectl & GEN6_RP_ENABLE));
819 seq_printf(m, "SW control enabled: %s\n",
820 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
821 GEN6_RP_MEDIA_SW_MODE));
823 vlv_punit_get(dev_priv);
824 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
825 vlv_punit_put(dev_priv);
827 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
828 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
830 seq_printf(m, "actual GPU freq: %d MHz\n",
831 intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
833 seq_printf(m, "current GPU freq: %d MHz\n",
834 intel_gpu_freq(rps, rps->cur_freq));
836 seq_printf(m, "max GPU freq: %d MHz\n",
837 intel_gpu_freq(rps, rps->max_freq));
839 seq_printf(m, "min GPU freq: %d MHz\n",
840 intel_gpu_freq(rps, rps->min_freq));
842 seq_printf(m, "idle GPU freq: %d MHz\n",
843 intel_gpu_freq(rps, rps->idle_freq));
846 "efficient (RPe) frequency: %d MHz\n",
847 intel_gpu_freq(rps, rps->efficient_freq));
848 } else if (INTEL_GEN(dev_priv) >= 6) {
852 u32 rpmodectl, rpinclimit, rpdeclimit;
853 u32 rpstat, cagf, reqf;
854 u32 rpupei, rpcurup, rpprevup;
855 u32 rpdownei, rpcurdown, rpprevdown;
856 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
859 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
860 if (IS_GEN9_LP(dev_priv)) {
861 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
862 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
864 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
865 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
868 /* RPSTAT1 is in the GT power well */
869 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
871 reqf = I915_READ(GEN6_RPNSWREQ);
872 if (INTEL_GEN(dev_priv) >= 9)
875 reqf &= ~GEN6_TURBO_DISABLE;
876 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
881 reqf = intel_gpu_freq(rps, reqf);
883 rpmodectl = I915_READ(GEN6_RP_CONTROL);
884 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
885 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
887 rpstat = I915_READ(GEN6_RPSTAT1);
888 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
889 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
890 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
891 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
892 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
893 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
894 cagf = intel_gpu_freq(rps, intel_get_cagf(rps, rpstat));
896 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
898 if (INTEL_GEN(dev_priv) >= 11) {
899 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
900 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
902 * The equivalent to the PM ISR & IIR cannot be read
903 * without affecting the current state of the system
907 } else if (INTEL_GEN(dev_priv) >= 8) {
908 pm_ier = I915_READ(GEN8_GT_IER(2));
909 pm_imr = I915_READ(GEN8_GT_IMR(2));
910 pm_isr = I915_READ(GEN8_GT_ISR(2));
911 pm_iir = I915_READ(GEN8_GT_IIR(2));
913 pm_ier = I915_READ(GEN6_PMIER);
914 pm_imr = I915_READ(GEN6_PMIMR);
915 pm_isr = I915_READ(GEN6_PMISR);
916 pm_iir = I915_READ(GEN6_PMIIR);
918 pm_mask = I915_READ(GEN6_PMINTRMSK);
920 seq_printf(m, "Video Turbo Mode: %s\n",
921 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
922 seq_printf(m, "HW control enabled: %s\n",
923 yesno(rpmodectl & GEN6_RP_ENABLE));
924 seq_printf(m, "SW control enabled: %s\n",
925 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
926 GEN6_RP_MEDIA_SW_MODE));
928 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
929 pm_ier, pm_imr, pm_mask);
930 if (INTEL_GEN(dev_priv) <= 10)
931 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
933 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
934 rps->pm_intrmsk_mbz);
935 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
936 seq_printf(m, "Render p-state ratio: %d\n",
937 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
938 seq_printf(m, "Render p-state VID: %d\n",
939 gt_perf_status & 0xff);
940 seq_printf(m, "Render p-state limit: %d\n",
941 rp_state_limits & 0xff);
942 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
943 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
944 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
945 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
946 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
947 seq_printf(m, "CAGF: %dMHz\n", cagf);
948 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
949 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
950 seq_printf(m, "RP CUR UP: %d (%dus)\n",
951 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
952 seq_printf(m, "RP PREV UP: %d (%dus)\n",
953 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
954 seq_printf(m, "Up threshold: %d%%\n",
955 rps->power.up_threshold);
957 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
958 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
959 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
960 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
961 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
962 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
963 seq_printf(m, "Down threshold: %d%%\n",
964 rps->power.down_threshold);
966 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
967 rp_state_cap >> 16) & 0xff;
968 max_freq *= (IS_GEN9_BC(dev_priv) ||
969 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
970 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
971 intel_gpu_freq(rps, max_freq));
973 max_freq = (rp_state_cap & 0xff00) >> 8;
974 max_freq *= (IS_GEN9_BC(dev_priv) ||
975 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
976 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
977 intel_gpu_freq(rps, max_freq));
979 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
980 rp_state_cap >> 0) & 0xff;
981 max_freq *= (IS_GEN9_BC(dev_priv) ||
982 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
983 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
984 intel_gpu_freq(rps, max_freq));
985 seq_printf(m, "Max overclocked frequency: %dMHz\n",
986 intel_gpu_freq(rps, rps->max_freq));
988 seq_printf(m, "Current freq: %d MHz\n",
989 intel_gpu_freq(rps, rps->cur_freq));
990 seq_printf(m, "Actual freq: %d MHz\n", cagf);
991 seq_printf(m, "Idle freq: %d MHz\n",
992 intel_gpu_freq(rps, rps->idle_freq));
993 seq_printf(m, "Min freq: %d MHz\n",
994 intel_gpu_freq(rps, rps->min_freq));
995 seq_printf(m, "Boost freq: %d MHz\n",
996 intel_gpu_freq(rps, rps->boost_freq));
997 seq_printf(m, "Max freq: %d MHz\n",
998 intel_gpu_freq(rps, rps->max_freq));
1000 "efficient (RPe) frequency: %d MHz\n",
1001 intel_gpu_freq(rps, rps->efficient_freq));
1003 seq_puts(m, "no P-state info available\n");
1006 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1007 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1008 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1010 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1014 static int ironlake_drpc_info(struct seq_file *m)
1016 struct drm_i915_private *i915 = node_to_i915(m->private);
1017 struct intel_uncore *uncore = &i915->uncore;
1018 u32 rgvmodectl, rstdbyctl;
1021 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
1022 rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
1023 crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
1025 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1026 seq_printf(m, "Boost freq: %d\n",
1027 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1028 MEMMODE_BOOST_FREQ_SHIFT);
1029 seq_printf(m, "HW control enabled: %s\n",
1030 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1031 seq_printf(m, "SW control enabled: %s\n",
1032 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1033 seq_printf(m, "Gated voltage change: %s\n",
1034 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1035 seq_printf(m, "Starting frequency: P%d\n",
1036 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1037 seq_printf(m, "Max P-state: P%d\n",
1038 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1039 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1040 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1041 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1042 seq_printf(m, "Render standby enabled: %s\n",
1043 yesno(!(rstdbyctl & RCX_SW_EXIT)));
1044 seq_puts(m, "Current RS state: ");
1045 switch (rstdbyctl & RSX_STATUS_MASK) {
1047 seq_puts(m, "on\n");
1049 case RSX_STATUS_RC1:
1050 seq_puts(m, "RC1\n");
1052 case RSX_STATUS_RC1E:
1053 seq_puts(m, "RC1E\n");
1055 case RSX_STATUS_RS1:
1056 seq_puts(m, "RS1\n");
1058 case RSX_STATUS_RS2:
1059 seq_puts(m, "RS2 (RC6)\n");
1061 case RSX_STATUS_RS3:
1062 seq_puts(m, "RC3 (RC6+)\n");
1065 seq_puts(m, "unknown\n");
1072 static int i915_forcewake_domains(struct seq_file *m, void *data)
1074 struct drm_i915_private *i915 = node_to_i915(m->private);
1075 struct intel_uncore *uncore = &i915->uncore;
1076 struct intel_uncore_forcewake_domain *fw_domain;
1079 seq_printf(m, "user.bypass_count = %u\n",
1080 uncore->user_forcewake_count);
1082 for_each_fw_domain(fw_domain, uncore, tmp)
1083 seq_printf(m, "%s.wake_count = %u\n",
1084 intel_uncore_forcewake_domain_to_str(fw_domain->id),
1085 READ_ONCE(fw_domain->wake_count));
1090 static void print_rc6_res(struct seq_file *m,
1092 const i915_reg_t reg)
1094 struct drm_i915_private *i915 = node_to_i915(m->private);
1095 intel_wakeref_t wakeref;
1097 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
1098 seq_printf(m, "%s %u (%llu us)\n", title,
1099 intel_uncore_read(&i915->uncore, reg),
1100 intel_rc6_residency_us(&i915->gt.rc6, reg));
1103 static int vlv_drpc_info(struct seq_file *m)
1105 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1106 u32 rcctl1, pw_status;
1108 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1109 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1111 seq_printf(m, "RC6 Enabled: %s\n",
1112 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1113 GEN6_RC_CTL_EI_MODE(1))));
1114 seq_printf(m, "Render Power Well: %s\n",
1115 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1116 seq_printf(m, "Media Power Well: %s\n",
1117 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1119 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1120 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1122 return i915_forcewake_domains(m, NULL);
1125 static int gen6_drpc_info(struct seq_file *m)
1127 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1128 u32 gt_core_status, rcctl1, rc6vids = 0;
1129 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1131 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1132 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1134 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1135 if (INTEL_GEN(dev_priv) >= 9) {
1136 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1137 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1140 if (INTEL_GEN(dev_priv) <= 7)
1141 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1144 seq_printf(m, "RC1e Enabled: %s\n",
1145 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1146 seq_printf(m, "RC6 Enabled: %s\n",
1147 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1148 if (INTEL_GEN(dev_priv) >= 9) {
1149 seq_printf(m, "Render Well Gating Enabled: %s\n",
1150 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1151 seq_printf(m, "Media Well Gating Enabled: %s\n",
1152 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1154 seq_printf(m, "Deep RC6 Enabled: %s\n",
1155 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1156 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1157 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1158 seq_puts(m, "Current RC state: ");
1159 switch (gt_core_status & GEN6_RCn_MASK) {
1161 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1162 seq_puts(m, "Core Power Down\n");
1164 seq_puts(m, "on\n");
1167 seq_puts(m, "RC3\n");
1170 seq_puts(m, "RC6\n");
1173 seq_puts(m, "RC7\n");
1176 seq_puts(m, "Unknown\n");
1180 seq_printf(m, "Core Power Down: %s\n",
1181 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1182 if (INTEL_GEN(dev_priv) >= 9) {
1183 seq_printf(m, "Render Power Well: %s\n",
1184 (gen9_powergate_status &
1185 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1186 seq_printf(m, "Media Power Well: %s\n",
1187 (gen9_powergate_status &
1188 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1191 /* Not exactly sure what this is */
1192 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1193 GEN6_GT_GFX_RC6_LOCKED);
1194 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1195 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1196 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1198 if (INTEL_GEN(dev_priv) <= 7) {
1199 seq_printf(m, "RC6 voltage: %dmV\n",
1200 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1201 seq_printf(m, "RC6+ voltage: %dmV\n",
1202 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1203 seq_printf(m, "RC6++ voltage: %dmV\n",
1204 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1207 return i915_forcewake_domains(m, NULL);
1210 static int i915_drpc_info(struct seq_file *m, void *unused)
1212 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1213 intel_wakeref_t wakeref;
1216 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1217 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1218 err = vlv_drpc_info(m);
1219 else if (INTEL_GEN(dev_priv) >= 6)
1220 err = gen6_drpc_info(m);
1222 err = ironlake_drpc_info(m);
1228 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1230 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1232 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1233 dev_priv->fb_tracking.busy_bits);
1235 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1236 dev_priv->fb_tracking.flip_bits);
1241 static int i915_fbc_status(struct seq_file *m, void *unused)
1243 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1244 struct intel_fbc *fbc = &dev_priv->fbc;
1245 intel_wakeref_t wakeref;
1247 if (!HAS_FBC(dev_priv))
1250 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1251 mutex_lock(&fbc->lock);
1253 if (intel_fbc_is_active(dev_priv))
1254 seq_puts(m, "FBC enabled\n");
1256 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1258 if (intel_fbc_is_active(dev_priv)) {
1261 if (INTEL_GEN(dev_priv) >= 8)
1262 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1263 else if (INTEL_GEN(dev_priv) >= 7)
1264 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1265 else if (INTEL_GEN(dev_priv) >= 5)
1266 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1267 else if (IS_G4X(dev_priv))
1268 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1270 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1271 FBC_STAT_COMPRESSED);
1273 seq_printf(m, "Compressing: %s\n", yesno(mask));
1276 mutex_unlock(&fbc->lock);
1277 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1282 static int i915_fbc_false_color_get(void *data, u64 *val)
1284 struct drm_i915_private *dev_priv = data;
1286 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1289 *val = dev_priv->fbc.false_color;
1294 static int i915_fbc_false_color_set(void *data, u64 val)
1296 struct drm_i915_private *dev_priv = data;
1299 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1302 mutex_lock(&dev_priv->fbc.lock);
1304 reg = I915_READ(ILK_DPFC_CONTROL);
1305 dev_priv->fbc.false_color = val;
1307 I915_WRITE(ILK_DPFC_CONTROL, val ?
1308 (reg | FBC_CTL_FALSE_COLOR) :
1309 (reg & ~FBC_CTL_FALSE_COLOR));
1311 mutex_unlock(&dev_priv->fbc.lock);
1315 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1316 i915_fbc_false_color_get, i915_fbc_false_color_set,
1319 static int i915_ips_status(struct seq_file *m, void *unused)
1321 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1322 intel_wakeref_t wakeref;
1324 if (!HAS_IPS(dev_priv))
1327 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1329 seq_printf(m, "Enabled by kernel parameter: %s\n",
1330 yesno(i915_modparams.enable_ips));
1332 if (INTEL_GEN(dev_priv) >= 8) {
1333 seq_puts(m, "Currently: unknown\n");
1335 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1336 seq_puts(m, "Currently: enabled\n");
1338 seq_puts(m, "Currently: disabled\n");
1341 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1346 static int i915_sr_status(struct seq_file *m, void *unused)
1348 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1349 intel_wakeref_t wakeref;
1350 bool sr_enabled = false;
1352 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1354 if (INTEL_GEN(dev_priv) >= 9)
1355 /* no global SR status; inspect per-plane WM */;
1356 else if (HAS_PCH_SPLIT(dev_priv))
1357 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1358 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1359 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1360 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1361 else if (IS_I915GM(dev_priv))
1362 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1363 else if (IS_PINEVIEW(dev_priv))
1364 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1365 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1366 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1368 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1370 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1375 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1377 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1378 struct intel_rps *rps = &dev_priv->gt.rps;
1379 unsigned int max_gpu_freq, min_gpu_freq;
1380 intel_wakeref_t wakeref;
1381 int gpu_freq, ia_freq;
1383 if (!HAS_LLC(dev_priv))
1386 min_gpu_freq = rps->min_freq;
1387 max_gpu_freq = rps->max_freq;
1388 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1389 /* Convert GT frequency to 50 HZ units */
1390 min_gpu_freq /= GEN9_FREQ_SCALER;
1391 max_gpu_freq /= GEN9_FREQ_SCALER;
1394 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1396 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1397 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1399 sandybridge_pcode_read(dev_priv,
1400 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1402 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1405 (IS_GEN9_BC(dev_priv) ||
1406 INTEL_GEN(dev_priv) >= 10 ?
1407 GEN9_FREQ_SCALER : 1))),
1408 ((ia_freq >> 0) & 0xff) * 100,
1409 ((ia_freq >> 8) & 0xff) * 100);
1411 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1416 static int i915_opregion(struct seq_file *m, void *unused)
1418 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1420 if (opregion->header)
1421 seq_write(m, opregion->header, OPREGION_SIZE);
1426 static int i915_vbt(struct seq_file *m, void *unused)
1428 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1431 seq_write(m, opregion->vbt, opregion->vbt_size);
1436 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1438 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1439 struct drm_device *dev = &dev_priv->drm;
1440 struct intel_framebuffer *fbdev_fb = NULL;
1441 struct drm_framebuffer *drm_fb;
1443 #ifdef CONFIG_DRM_FBDEV_EMULATION
1444 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1445 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1447 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1448 fbdev_fb->base.width,
1449 fbdev_fb->base.height,
1450 fbdev_fb->base.format->depth,
1451 fbdev_fb->base.format->cpp[0] * 8,
1452 fbdev_fb->base.modifier,
1453 drm_framebuffer_read_refcount(&fbdev_fb->base));
1454 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1459 mutex_lock(&dev->mode_config.fb_lock);
1460 drm_for_each_fb(drm_fb, dev) {
1461 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1465 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1468 fb->base.format->depth,
1469 fb->base.format->cpp[0] * 8,
1471 drm_framebuffer_read_refcount(&fb->base));
1472 describe_obj(m, intel_fb_obj(&fb->base));
1475 mutex_unlock(&dev->mode_config.fb_lock);
1480 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1482 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1483 ring->space, ring->head, ring->tail, ring->emit);
1486 static int i915_context_status(struct seq_file *m, void *unused)
1488 struct drm_i915_private *i915 = node_to_i915(m->private);
1489 struct i915_gem_context *ctx, *cn;
1491 spin_lock(&i915->gem.contexts.lock);
1492 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
1493 struct i915_gem_engines_iter it;
1494 struct intel_context *ce;
1496 if (!kref_get_unless_zero(&ctx->ref))
1499 spin_unlock(&i915->gem.contexts.lock);
1501 seq_puts(m, "HW context ");
1503 struct task_struct *task;
1505 task = get_pid_task(ctx->pid, PIDTYPE_PID);
1507 seq_printf(m, "(%s [%d]) ",
1508 task->comm, task->pid);
1509 put_task_struct(task);
1511 } else if (IS_ERR(ctx->file_priv)) {
1512 seq_puts(m, "(deleted) ");
1514 seq_puts(m, "(kernel) ");
1517 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1520 for_each_gem_engine(ce,
1521 i915_gem_context_lock_engines(ctx), it) {
1522 intel_context_lock_pinned(ce);
1523 if (intel_context_is_pinned(ce)) {
1524 seq_printf(m, "%s: ", ce->engine->name);
1526 describe_obj(m, ce->state->obj);
1527 describe_ctx_ring(m, ce->ring);
1530 intel_context_unlock_pinned(ce);
1532 i915_gem_context_unlock_engines(ctx);
1536 spin_lock(&i915->gem.contexts.lock);
1537 list_safe_reset_next(ctx, cn, link);
1538 i915_gem_context_put(ctx);
1540 spin_unlock(&i915->gem.contexts.lock);
1545 static const char *swizzle_string(unsigned swizzle)
1548 case I915_BIT_6_SWIZZLE_NONE:
1550 case I915_BIT_6_SWIZZLE_9:
1552 case I915_BIT_6_SWIZZLE_9_10:
1553 return "bit9/bit10";
1554 case I915_BIT_6_SWIZZLE_9_11:
1555 return "bit9/bit11";
1556 case I915_BIT_6_SWIZZLE_9_10_11:
1557 return "bit9/bit10/bit11";
1558 case I915_BIT_6_SWIZZLE_9_17:
1559 return "bit9/bit17";
1560 case I915_BIT_6_SWIZZLE_9_10_17:
1561 return "bit9/bit10/bit17";
1562 case I915_BIT_6_SWIZZLE_UNKNOWN:
1569 static int i915_swizzle_info(struct seq_file *m, void *data)
1571 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1572 struct intel_uncore *uncore = &dev_priv->uncore;
1573 intel_wakeref_t wakeref;
1575 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1577 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1578 swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
1579 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1580 swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
1582 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1583 seq_printf(m, "DDC = 0x%08x\n",
1584 intel_uncore_read(uncore, DCC));
1585 seq_printf(m, "DDC2 = 0x%08x\n",
1586 intel_uncore_read(uncore, DCC2));
1587 seq_printf(m, "C0DRB3 = 0x%04x\n",
1588 intel_uncore_read16(uncore, C0DRB3));
1589 seq_printf(m, "C1DRB3 = 0x%04x\n",
1590 intel_uncore_read16(uncore, C1DRB3));
1591 } else if (INTEL_GEN(dev_priv) >= 6) {
1592 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1593 intel_uncore_read(uncore, MAD_DIMM_C0));
1594 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1595 intel_uncore_read(uncore, MAD_DIMM_C1));
1596 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1597 intel_uncore_read(uncore, MAD_DIMM_C2));
1598 seq_printf(m, "TILECTL = 0x%08x\n",
1599 intel_uncore_read(uncore, TILECTL));
1600 if (INTEL_GEN(dev_priv) >= 8)
1601 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1602 intel_uncore_read(uncore, GAMTARBMODE));
1604 seq_printf(m, "ARB_MODE = 0x%08x\n",
1605 intel_uncore_read(uncore, ARB_MODE));
1606 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1607 intel_uncore_read(uncore, DISP_ARB_CTL));
1610 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1611 seq_puts(m, "L-shaped memory detected\n");
1613 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1618 static const char *rps_power_to_str(unsigned int power)
1620 static const char * const strings[] = {
1621 [LOW_POWER] = "low power",
1622 [BETWEEN] = "mixed",
1623 [HIGH_POWER] = "high power",
1626 if (power >= ARRAY_SIZE(strings) || !strings[power])
1629 return strings[power];
1632 static int i915_rps_boost_info(struct seq_file *m, void *data)
1634 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1635 struct intel_rps *rps = &dev_priv->gt.rps;
1636 u32 act_freq = rps->cur_freq;
1637 intel_wakeref_t wakeref;
1639 with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) {
1640 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1641 vlv_punit_get(dev_priv);
1642 act_freq = vlv_punit_read(dev_priv,
1643 PUNIT_REG_GPU_FREQ_STS);
1644 vlv_punit_put(dev_priv);
1645 act_freq = (act_freq >> 8) & 0xff;
1647 act_freq = intel_get_cagf(rps,
1648 I915_READ(GEN6_RPSTAT1));
1652 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
1653 seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
1654 seq_printf(m, "Boosts outstanding? %d\n",
1655 atomic_read(&rps->num_waiters));
1656 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
1657 seq_printf(m, "Frequency requested %d, actual %d\n",
1658 intel_gpu_freq(rps, rps->cur_freq),
1659 intel_gpu_freq(rps, act_freq));
1660 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1661 intel_gpu_freq(rps, rps->min_freq),
1662 intel_gpu_freq(rps, rps->min_freq_softlimit),
1663 intel_gpu_freq(rps, rps->max_freq_softlimit),
1664 intel_gpu_freq(rps, rps->max_freq));
1665 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
1666 intel_gpu_freq(rps, rps->idle_freq),
1667 intel_gpu_freq(rps, rps->efficient_freq),
1668 intel_gpu_freq(rps, rps->boost_freq));
1670 seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
1672 if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
1674 u32 rpdown, rpdownei;
1676 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1677 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
1678 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
1679 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
1680 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
1681 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1683 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
1684 rps_power_to_str(rps->power.mode));
1685 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
1686 rpup && rpupei ? 100 * rpup / rpupei : 0,
1687 rps->power.up_threshold);
1688 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
1689 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
1690 rps->power.down_threshold);
1692 seq_puts(m, "\nRPS Autotuning inactive\n");
1698 static int i915_llc(struct seq_file *m, void *data)
1700 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1701 const bool edram = INTEL_GEN(dev_priv) > 8;
1703 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
1704 seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
1705 dev_priv->edram_size_mb);
1710 static int i915_huc_load_status_info(struct seq_file *m, void *data)
1712 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1713 intel_wakeref_t wakeref;
1714 struct drm_printer p;
1716 if (!HAS_GT_UC(dev_priv))
1719 p = drm_seq_file_printer(m);
1720 intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
1722 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1723 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
1728 static int i915_guc_load_status_info(struct seq_file *m, void *data)
1730 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1731 intel_wakeref_t wakeref;
1732 struct drm_printer p;
1734 if (!HAS_GT_UC(dev_priv))
1737 p = drm_seq_file_printer(m);
1738 intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
1740 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1741 u32 tmp = I915_READ(GUC_STATUS);
1744 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
1745 seq_printf(m, "\tBootrom status = 0x%x\n",
1746 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
1747 seq_printf(m, "\tuKernel status = 0x%x\n",
1748 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
1749 seq_printf(m, "\tMIA Core status = 0x%x\n",
1750 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
1751 seq_puts(m, "\nScratch registers:\n");
1752 for (i = 0; i < 16; i++) {
1753 seq_printf(m, "\t%2d: \t0x%x\n",
1754 i, I915_READ(SOFT_SCRATCH(i)));
1762 stringify_guc_log_type(enum guc_log_buffer_type type)
1765 case GUC_ISR_LOG_BUFFER:
1767 case GUC_DPC_LOG_BUFFER:
1769 case GUC_CRASH_DUMP_LOG_BUFFER:
1778 static void i915_guc_log_info(struct seq_file *m,
1779 struct drm_i915_private *dev_priv)
1781 struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
1782 enum guc_log_buffer_type type;
1784 if (!intel_guc_log_relay_created(log)) {
1785 seq_puts(m, "GuC log relay not created\n");
1789 seq_puts(m, "GuC logging stats:\n");
1791 seq_printf(m, "\tRelay full count: %u\n",
1792 log->relay.full_count);
1794 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
1795 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
1796 stringify_guc_log_type(type),
1797 log->stats[type].flush,
1798 log->stats[type].sampled_overflow);
1802 static int i915_guc_info(struct seq_file *m, void *data)
1804 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1805 const struct intel_guc *guc = &dev_priv->gt.uc.guc;
1806 struct intel_guc_client *client = guc->execbuf_client;
1808 if (!USES_GUC(dev_priv))
1811 i915_guc_log_info(m, dev_priv);
1813 if (!USES_GUC_SUBMISSION(dev_priv))
1816 GEM_BUG_ON(!guc->execbuf_client);
1818 seq_printf(m, "\nDoorbell map:\n");
1819 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
1820 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
1822 seq_printf(m, "\nGuC execbuf client @ %p:\n", client);
1823 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
1826 client->proc_desc_offset);
1827 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
1828 client->doorbell_id, client->doorbell_offset);
1829 /* Add more as required ... */
1834 static int i915_guc_stage_pool(struct seq_file *m, void *data)
1836 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1837 const struct intel_guc *guc = &dev_priv->gt.uc.guc;
1838 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
1841 if (!USES_GUC_SUBMISSION(dev_priv))
1844 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
1845 struct intel_engine_cs *engine;
1847 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
1850 seq_printf(m, "GuC stage descriptor %u:\n", index);
1851 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
1852 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
1853 seq_printf(m, "\tPriority: %d\n", desc->priority);
1854 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
1855 seq_printf(m, "\tEngines used: 0x%x\n",
1856 desc->engines_used);
1857 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
1858 desc->db_trigger_phy,
1859 desc->db_trigger_cpu,
1860 desc->db_trigger_uk);
1861 seq_printf(m, "\tProcess descriptor: 0x%x\n",
1862 desc->process_desc);
1863 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
1864 desc->wq_addr, desc->wq_size);
1867 for_each_uabi_engine(engine, dev_priv) {
1868 u32 guc_engine_id = engine->guc_id;
1869 struct guc_execlist_context *lrc =
1870 &desc->lrc[guc_engine_id];
1872 seq_printf(m, "\t%s LRC:\n", engine->name);
1873 seq_printf(m, "\t\tContext desc: 0x%x\n",
1875 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
1876 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
1877 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
1878 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
1886 static int i915_guc_log_dump(struct seq_file *m, void *data)
1888 struct drm_info_node *node = m->private;
1889 struct drm_i915_private *dev_priv = node_to_i915(node);
1890 bool dump_load_err = !!node->info_ent->data;
1891 struct drm_i915_gem_object *obj = NULL;
1895 if (!HAS_GT_UC(dev_priv))
1899 obj = dev_priv->gt.uc.load_err_log;
1900 else if (dev_priv->gt.uc.guc.log.vma)
1901 obj = dev_priv->gt.uc.guc.log.vma->obj;
1906 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
1908 DRM_DEBUG("Failed to pin object\n");
1909 seq_puts(m, "(log data unaccessible)\n");
1910 return PTR_ERR(log);
1913 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
1914 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
1915 *(log + i), *(log + i + 1),
1916 *(log + i + 2), *(log + i + 3));
1920 i915_gem_object_unpin_map(obj);
1925 static int i915_guc_log_level_get(void *data, u64 *val)
1927 struct drm_i915_private *dev_priv = data;
1929 if (!USES_GUC(dev_priv))
1932 *val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log);
1937 static int i915_guc_log_level_set(void *data, u64 val)
1939 struct drm_i915_private *dev_priv = data;
1941 if (!USES_GUC(dev_priv))
1944 return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val);
1947 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
1948 i915_guc_log_level_get, i915_guc_log_level_set,
1951 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
1953 struct drm_i915_private *i915 = inode->i_private;
1954 struct intel_guc *guc = &i915->gt.uc.guc;
1955 struct intel_guc_log *log = &guc->log;
1957 if (!intel_guc_is_running(guc))
1960 file->private_data = log;
1962 return intel_guc_log_relay_open(log);
1966 i915_guc_log_relay_write(struct file *filp,
1967 const char __user *ubuf,
1971 struct intel_guc_log *log = filp->private_data;
1975 ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
1980 * Enable and start the guc log relay on value of 1.
1981 * Flush log relay for any other value.
1984 ret = intel_guc_log_relay_start(log);
1986 intel_guc_log_relay_flush(log);
1991 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
1993 struct drm_i915_private *i915 = inode->i_private;
1994 struct intel_guc *guc = &i915->gt.uc.guc;
1996 intel_guc_log_relay_close(&guc->log);
2000 static const struct file_operations i915_guc_log_relay_fops = {
2001 .owner = THIS_MODULE,
2002 .open = i915_guc_log_relay_open,
2003 .write = i915_guc_log_relay_write,
2004 .release = i915_guc_log_relay_release,
2007 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2010 static const char * const sink_status[] = {
2012 "transition to active, capture and display",
2013 "active, display from RFB",
2014 "active, capture and display on sink device timings",
2015 "transition to inactive, capture and display, timing re-sync",
2018 "sink internal error",
2020 struct drm_connector *connector = m->private;
2021 struct drm_i915_private *dev_priv = to_i915(connector->dev);
2022 struct intel_dp *intel_dp =
2023 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2026 if (!CAN_PSR(dev_priv)) {
2027 seq_puts(m, "PSR Unsupported\n");
2031 if (connector->status != connector_status_connected)
2034 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2037 const char *str = "unknown";
2039 val &= DP_PSR_SINK_STATE_MASK;
2040 if (val < ARRAY_SIZE(sink_status))
2041 str = sink_status[val];
2042 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2049 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2052 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2054 u32 val, status_val;
2055 const char *status = "unknown";
2057 if (dev_priv->psr.psr2_enabled) {
2058 static const char * const live_status[] = {
2071 val = I915_READ(EDP_PSR2_STATUS(dev_priv->psr.transcoder));
2072 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2073 EDP_PSR2_STATUS_STATE_SHIFT;
2074 if (status_val < ARRAY_SIZE(live_status))
2075 status = live_status[status_val];
2077 static const char * const live_status[] = {
2087 val = I915_READ(EDP_PSR_STATUS(dev_priv->psr.transcoder));
2088 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2089 EDP_PSR_STATUS_STATE_SHIFT;
2090 if (status_val < ARRAY_SIZE(live_status))
2091 status = live_status[status_val];
2094 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2097 static int i915_edp_psr_status(struct seq_file *m, void *data)
2099 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2100 struct i915_psr *psr = &dev_priv->psr;
2101 intel_wakeref_t wakeref;
2106 if (!HAS_PSR(dev_priv))
2109 seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2111 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2114 if (!psr->sink_support)
2117 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2118 mutex_lock(&psr->lock);
2121 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2123 status = "disabled";
2124 seq_printf(m, "PSR mode: %s\n", status);
2126 if (!psr->enabled) {
2127 seq_printf(m, "PSR sink not reliable: %s\n",
2128 yesno(psr->sink_not_reliable));
2133 if (psr->psr2_enabled) {
2134 val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
2135 enabled = val & EDP_PSR2_ENABLE;
2137 val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
2138 enabled = val & EDP_PSR_ENABLE;
2140 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2141 enableddisabled(enabled), val);
2142 psr_source_status(dev_priv, m);
2143 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2144 psr->busy_frontbuffer_bits);
2147 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2149 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2150 val = I915_READ(EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
2151 val &= EDP_PSR_PERF_CNT_MASK;
2152 seq_printf(m, "Performance counter: %u\n", val);
2155 if (psr->debug & I915_PSR_DEBUG_IRQ) {
2156 seq_printf(m, "Last attempted entry at: %lld\n",
2157 psr->last_entry_attempt);
2158 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2161 if (psr->psr2_enabled) {
2162 u32 su_frames_val[3];
2166 * Reading all 3 registers before hand to minimize crossing a
2167 * frame boundary between register reads
2169 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
2170 val = I915_READ(PSR2_SU_STATUS(dev_priv->psr.transcoder,
2172 su_frames_val[frame / 3] = val;
2175 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2177 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2180 su_blocks = su_frames_val[frame / 3] &
2181 PSR2_SU_STATUS_MASK(frame);
2182 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2183 seq_printf(m, "%d\t%d\n", frame, su_blocks);
2188 mutex_unlock(&psr->lock);
2189 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2195 i915_edp_psr_debug_set(void *data, u64 val)
2197 struct drm_i915_private *dev_priv = data;
2198 intel_wakeref_t wakeref;
2201 if (!CAN_PSR(dev_priv))
2204 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2206 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2208 ret = intel_psr_debug_set(dev_priv, val);
2210 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2216 i915_edp_psr_debug_get(void *data, u64 *val)
2218 struct drm_i915_private *dev_priv = data;
2220 if (!CAN_PSR(dev_priv))
2223 *val = READ_ONCE(dev_priv->psr.debug);
2227 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2228 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2231 static int i915_energy_uJ(struct seq_file *m, void *data)
2233 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2234 unsigned long long power;
2235 intel_wakeref_t wakeref;
2238 if (INTEL_GEN(dev_priv) < 6)
2241 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2244 units = (power & 0x1f00) >> 8;
2245 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
2246 power = I915_READ(MCH_SECP_NRG_STTS);
2248 power = (1000000 * power) >> units; /* convert to uJ */
2249 seq_printf(m, "%llu", power);
2254 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2256 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2257 struct pci_dev *pdev = dev_priv->drm.pdev;
2259 if (!HAS_RUNTIME_PM(dev_priv))
2260 seq_puts(m, "Runtime power management not supported\n");
2262 seq_printf(m, "Runtime power status: %s\n",
2263 enableddisabled(!dev_priv->power_domains.wakeref));
2265 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2266 seq_printf(m, "IRQs disabled: %s\n",
2267 yesno(!intel_irqs_enabled(dev_priv)));
2269 seq_printf(m, "Usage count: %d\n",
2270 atomic_read(&dev_priv->drm.dev->power.usage_count));
2272 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2274 seq_printf(m, "PCI device power state: %s [%d]\n",
2275 pci_power_name(pdev->current_state),
2276 pdev->current_state);
2278 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2279 struct drm_printer p = drm_seq_file_printer(m);
2281 print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
2287 static int i915_power_domain_info(struct seq_file *m, void *unused)
2289 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2290 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2293 mutex_lock(&power_domains->lock);
2295 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2296 for (i = 0; i < power_domains->power_well_count; i++) {
2297 struct i915_power_well *power_well;
2298 enum intel_display_power_domain power_domain;
2300 power_well = &power_domains->power_wells[i];
2301 seq_printf(m, "%-25s %d\n", power_well->desc->name,
2304 for_each_power_domain(power_domain, power_well->desc->domains)
2305 seq_printf(m, " %-23s %d\n",
2306 intel_display_power_domain_str(power_domain),
2307 power_domains->domain_use_count[power_domain]);
2310 mutex_unlock(&power_domains->lock);
2315 static int i915_dmc_info(struct seq_file *m, void *unused)
2317 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2318 intel_wakeref_t wakeref;
2319 struct intel_csr *csr;
2320 i915_reg_t dc5_reg, dc6_reg = {};
2322 if (!HAS_CSR(dev_priv))
2325 csr = &dev_priv->csr;
2327 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2329 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2330 seq_printf(m, "path: %s\n", csr->fw_path);
2332 if (!csr->dmc_payload)
2335 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2336 CSR_VERSION_MINOR(csr->version));
2338 if (INTEL_GEN(dev_priv) >= 12) {
2339 dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
2340 dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
2342 * NOTE: DMC_DEBUG3 is a general purpose reg.
2343 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
2344 * reg for DC3CO debugging and validation,
2345 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
2347 seq_printf(m, "DC3CO count: %d\n", I915_READ(DMC_DEBUG3));
2349 dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2350 SKL_CSR_DC3_DC5_COUNT;
2351 if (!IS_GEN9_LP(dev_priv))
2352 dc6_reg = SKL_CSR_DC5_DC6_COUNT;
2355 seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg));
2357 seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg));
2360 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2361 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2362 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2364 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2369 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2370 struct drm_display_mode *mode)
2374 for (i = 0; i < tabs; i++)
2377 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2380 static void intel_encoder_info(struct seq_file *m,
2381 struct intel_crtc *intel_crtc,
2382 struct intel_encoder *intel_encoder)
2384 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2385 struct drm_device *dev = &dev_priv->drm;
2386 struct drm_crtc *crtc = &intel_crtc->base;
2387 struct intel_connector *intel_connector;
2388 struct drm_encoder *encoder;
2390 encoder = &intel_encoder->base;
2391 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2392 encoder->base.id, encoder->name);
2393 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2394 struct drm_connector *connector = &intel_connector->base;
2395 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2398 drm_get_connector_status_name(connector->status));
2399 if (connector->status == connector_status_connected) {
2400 struct drm_display_mode *mode = &crtc->mode;
2401 seq_printf(m, ", mode:\n");
2402 intel_seq_print_mode(m, 2, mode);
2409 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2411 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2412 struct drm_device *dev = &dev_priv->drm;
2413 struct drm_crtc *crtc = &intel_crtc->base;
2414 struct intel_encoder *intel_encoder;
2415 struct drm_plane_state *plane_state = crtc->primary->state;
2416 struct drm_framebuffer *fb = plane_state->fb;
2419 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2420 fb->base.id, plane_state->src_x >> 16,
2421 plane_state->src_y >> 16, fb->width, fb->height);
2423 seq_puts(m, "\tprimary plane disabled\n");
2424 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2425 intel_encoder_info(m, intel_crtc, intel_encoder);
2428 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2430 struct drm_display_mode *mode = panel->fixed_mode;
2432 seq_printf(m, "\tfixed mode:\n");
2433 intel_seq_print_mode(m, 2, mode);
2436 static void intel_hdcp_info(struct seq_file *m,
2437 struct intel_connector *intel_connector)
2439 bool hdcp_cap, hdcp2_cap;
2441 hdcp_cap = intel_hdcp_capable(intel_connector);
2442 hdcp2_cap = intel_hdcp2_capable(intel_connector);
2445 seq_puts(m, "HDCP1.4 ");
2447 seq_puts(m, "HDCP2.2 ");
2449 if (!hdcp_cap && !hdcp2_cap)
2450 seq_puts(m, "None");
2455 static void intel_dp_info(struct seq_file *m,
2456 struct intel_connector *intel_connector)
2458 struct intel_encoder *intel_encoder = intel_connector->encoder;
2459 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2461 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2462 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2463 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2464 intel_panel_info(m, &intel_connector->panel);
2466 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2468 if (intel_connector->hdcp.shim) {
2469 seq_puts(m, "\tHDCP version: ");
2470 intel_hdcp_info(m, intel_connector);
2474 static void intel_dp_mst_info(struct seq_file *m,
2475 struct intel_connector *intel_connector)
2477 struct intel_encoder *intel_encoder = intel_connector->encoder;
2478 struct intel_dp_mst_encoder *intel_mst =
2479 enc_to_mst(&intel_encoder->base);
2480 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2481 struct intel_dp *intel_dp = &intel_dig_port->dp;
2482 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2483 intel_connector->port);
2485 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2488 static void intel_hdmi_info(struct seq_file *m,
2489 struct intel_connector *intel_connector)
2491 struct intel_encoder *intel_encoder = intel_connector->encoder;
2492 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2494 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2495 if (intel_connector->hdcp.shim) {
2496 seq_puts(m, "\tHDCP version: ");
2497 intel_hdcp_info(m, intel_connector);
2501 static void intel_lvds_info(struct seq_file *m,
2502 struct intel_connector *intel_connector)
2504 intel_panel_info(m, &intel_connector->panel);
2507 static void intel_connector_info(struct seq_file *m,
2508 struct drm_connector *connector)
2510 struct intel_connector *intel_connector = to_intel_connector(connector);
2511 struct intel_encoder *intel_encoder = intel_connector->encoder;
2512 struct drm_display_mode *mode;
2514 seq_printf(m, "connector %d: type %s, status: %s\n",
2515 connector->base.id, connector->name,
2516 drm_get_connector_status_name(connector->status));
2518 if (connector->status == connector_status_disconnected)
2521 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2522 connector->display_info.width_mm,
2523 connector->display_info.height_mm);
2524 seq_printf(m, "\tsubpixel order: %s\n",
2525 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2526 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2531 switch (connector->connector_type) {
2532 case DRM_MODE_CONNECTOR_DisplayPort:
2533 case DRM_MODE_CONNECTOR_eDP:
2534 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2535 intel_dp_mst_info(m, intel_connector);
2537 intel_dp_info(m, intel_connector);
2539 case DRM_MODE_CONNECTOR_LVDS:
2540 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2541 intel_lvds_info(m, intel_connector);
2543 case DRM_MODE_CONNECTOR_HDMIA:
2544 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2545 intel_encoder->type == INTEL_OUTPUT_DDI)
2546 intel_hdmi_info(m, intel_connector);
2552 seq_printf(m, "\tmodes:\n");
2553 list_for_each_entry(mode, &connector->modes, head)
2554 intel_seq_print_mode(m, 2, mode);
2557 static const char *plane_type(enum drm_plane_type type)
2560 case DRM_PLANE_TYPE_OVERLAY:
2562 case DRM_PLANE_TYPE_PRIMARY:
2564 case DRM_PLANE_TYPE_CURSOR:
2567 * Deliberately omitting default: to generate compiler warnings
2568 * when a new drm_plane_type gets added.
2575 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2578 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2579 * will print them all to visualize if the values are misused
2581 snprintf(buf, bufsize,
2582 "%s%s%s%s%s%s(0x%08x)",
2583 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2584 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2585 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2586 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2587 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2588 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2592 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2594 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2595 struct drm_device *dev = &dev_priv->drm;
2596 struct intel_plane *intel_plane;
2598 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2599 struct drm_plane_state *state;
2600 struct drm_plane *plane = &intel_plane->base;
2601 struct drm_format_name_buf format_name;
2604 if (!plane->state) {
2605 seq_puts(m, "plane->state is NULL!\n");
2609 state = plane->state;
2612 drm_get_format_name(state->fb->format->format,
2615 sprintf(format_name.str, "N/A");
2618 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2620 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2622 plane_type(intel_plane->base.type),
2623 state->crtc_x, state->crtc_y,
2624 state->crtc_w, state->crtc_h,
2625 (state->src_x >> 16),
2626 ((state->src_x & 0xffff) * 15625) >> 10,
2627 (state->src_y >> 16),
2628 ((state->src_y & 0xffff) * 15625) >> 10,
2629 (state->src_w >> 16),
2630 ((state->src_w & 0xffff) * 15625) >> 10,
2631 (state->src_h >> 16),
2632 ((state->src_h & 0xffff) * 15625) >> 10,
2638 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2640 struct intel_crtc_state *pipe_config;
2641 int num_scalers = intel_crtc->num_scalers;
2644 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
2646 /* Not all platformas have a scaler */
2648 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2650 pipe_config->scaler_state.scaler_users,
2651 pipe_config->scaler_state.scaler_id);
2653 for (i = 0; i < num_scalers; i++) {
2654 struct intel_scaler *sc =
2655 &pipe_config->scaler_state.scalers[i];
2657 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
2658 i, yesno(sc->in_use), sc->mode);
2662 seq_puts(m, "\tNo scalers available on this platform\n");
2666 static int i915_display_info(struct seq_file *m, void *unused)
2668 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2669 struct drm_device *dev = &dev_priv->drm;
2670 struct intel_crtc *crtc;
2671 struct drm_connector *connector;
2672 struct drm_connector_list_iter conn_iter;
2673 intel_wakeref_t wakeref;
2675 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2677 seq_printf(m, "CRTC info\n");
2678 seq_printf(m, "---------\n");
2679 for_each_intel_crtc(dev, crtc) {
2680 struct intel_crtc_state *pipe_config;
2682 drm_modeset_lock(&crtc->base.mutex, NULL);
2683 pipe_config = to_intel_crtc_state(crtc->base.state);
2685 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
2686 crtc->base.base.id, pipe_name(crtc->pipe),
2687 yesno(pipe_config->base.active),
2688 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
2689 yesno(pipe_config->dither), pipe_config->pipe_bpp);
2691 if (pipe_config->base.active) {
2692 struct intel_plane *cursor =
2693 to_intel_plane(crtc->base.cursor);
2695 intel_crtc_info(m, crtc);
2697 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
2698 yesno(cursor->base.state->visible),
2699 cursor->base.state->crtc_x,
2700 cursor->base.state->crtc_y,
2701 cursor->base.state->crtc_w,
2702 cursor->base.state->crtc_h,
2703 cursor->cursor.base);
2704 intel_scaler_info(m, crtc);
2705 intel_plane_info(m, crtc);
2708 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
2709 yesno(!crtc->cpu_fifo_underrun_disabled),
2710 yesno(!crtc->pch_fifo_underrun_disabled));
2711 drm_modeset_unlock(&crtc->base.mutex);
2714 seq_printf(m, "\n");
2715 seq_printf(m, "Connector info\n");
2716 seq_printf(m, "--------------\n");
2717 mutex_lock(&dev->mode_config.mutex);
2718 drm_connector_list_iter_begin(dev, &conn_iter);
2719 drm_for_each_connector_iter(connector, &conn_iter)
2720 intel_connector_info(m, connector);
2721 drm_connector_list_iter_end(&conn_iter);
2722 mutex_unlock(&dev->mode_config.mutex);
2724 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2729 static int i915_engine_info(struct seq_file *m, void *unused)
2731 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2732 struct intel_engine_cs *engine;
2733 intel_wakeref_t wakeref;
2734 struct drm_printer p;
2736 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2738 seq_printf(m, "GT awake? %s [%d]\n",
2739 yesno(dev_priv->gt.awake),
2740 atomic_read(&dev_priv->gt.wakeref.count));
2741 seq_printf(m, "CS timestamp frequency: %u kHz\n",
2742 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
2744 p = drm_seq_file_printer(m);
2745 for_each_uabi_engine(engine, dev_priv)
2746 intel_engine_dump(engine, &p, "%s\n", engine->name);
2748 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2753 static int i915_rcs_topology(struct seq_file *m, void *unused)
2755 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2756 struct drm_printer p = drm_seq_file_printer(m);
2758 intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
2763 static int i915_shrinker_info(struct seq_file *m, void *unused)
2765 struct drm_i915_private *i915 = node_to_i915(m->private);
2767 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
2768 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
2773 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2775 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2776 struct drm_device *dev = &dev_priv->drm;
2779 drm_modeset_lock_all(dev);
2780 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2781 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2783 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
2785 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2786 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
2787 seq_printf(m, " tracked hardware state:\n");
2788 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
2789 seq_printf(m, " dpll_md: 0x%08x\n",
2790 pll->state.hw_state.dpll_md);
2791 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
2792 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
2793 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
2794 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
2795 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
2796 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
2797 pll->state.hw_state.mg_refclkin_ctl);
2798 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
2799 pll->state.hw_state.mg_clktop2_coreclkctl1);
2800 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
2801 pll->state.hw_state.mg_clktop2_hsclkctl);
2802 seq_printf(m, " mg_pll_div0: 0x%08x\n",
2803 pll->state.hw_state.mg_pll_div0);
2804 seq_printf(m, " mg_pll_div1: 0x%08x\n",
2805 pll->state.hw_state.mg_pll_div1);
2806 seq_printf(m, " mg_pll_lf: 0x%08x\n",
2807 pll->state.hw_state.mg_pll_lf);
2808 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
2809 pll->state.hw_state.mg_pll_frac_lock);
2810 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
2811 pll->state.hw_state.mg_pll_ssc);
2812 seq_printf(m, " mg_pll_bias: 0x%08x\n",
2813 pll->state.hw_state.mg_pll_bias);
2814 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
2815 pll->state.hw_state.mg_pll_tdc_coldst_bias);
2817 drm_modeset_unlock_all(dev);
2822 static int i915_wa_registers(struct seq_file *m, void *unused)
2824 struct drm_i915_private *i915 = node_to_i915(m->private);
2825 struct intel_engine_cs *engine;
2827 for_each_uabi_engine(engine, i915) {
2828 const struct i915_wa_list *wal = &engine->ctx_wa_list;
2829 const struct i915_wa *wa;
2836 seq_printf(m, "%s: Workarounds applied: %u\n",
2837 engine->name, count);
2839 for (wa = wal->list; count--; wa++)
2840 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
2841 i915_mmio_reg_offset(wa->reg),
2844 seq_printf(m, "\n");
2850 static int i915_ipc_status_show(struct seq_file *m, void *data)
2852 struct drm_i915_private *dev_priv = m->private;
2854 seq_printf(m, "Isochronous Priority Control: %s\n",
2855 yesno(dev_priv->ipc_enabled));
2859 static int i915_ipc_status_open(struct inode *inode, struct file *file)
2861 struct drm_i915_private *dev_priv = inode->i_private;
2863 if (!HAS_IPC(dev_priv))
2866 return single_open(file, i915_ipc_status_show, dev_priv);
2869 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
2870 size_t len, loff_t *offp)
2872 struct seq_file *m = file->private_data;
2873 struct drm_i915_private *dev_priv = m->private;
2874 intel_wakeref_t wakeref;
2878 ret = kstrtobool_from_user(ubuf, len, &enable);
2882 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
2883 if (!dev_priv->ipc_enabled && enable)
2884 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
2885 dev_priv->wm.distrust_bios_wm = true;
2886 dev_priv->ipc_enabled = enable;
2887 intel_enable_ipc(dev_priv);
2893 static const struct file_operations i915_ipc_status_fops = {
2894 .owner = THIS_MODULE,
2895 .open = i915_ipc_status_open,
2897 .llseek = seq_lseek,
2898 .release = single_release,
2899 .write = i915_ipc_status_write
2902 static int i915_ddb_info(struct seq_file *m, void *unused)
2904 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2905 struct drm_device *dev = &dev_priv->drm;
2906 struct skl_ddb_entry *entry;
2907 struct intel_crtc *crtc;
2909 if (INTEL_GEN(dev_priv) < 9)
2912 drm_modeset_lock_all(dev);
2914 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
2916 for_each_intel_crtc(&dev_priv->drm, crtc) {
2917 struct intel_crtc_state *crtc_state =
2918 to_intel_crtc_state(crtc->base.state);
2919 enum pipe pipe = crtc->pipe;
2920 enum plane_id plane_id;
2922 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
2924 for_each_plane_id_on_crtc(crtc, plane_id) {
2925 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
2926 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
2927 entry->start, entry->end,
2928 skl_ddb_entry_size(entry));
2931 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
2932 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
2933 entry->end, skl_ddb_entry_size(entry));
2936 drm_modeset_unlock_all(dev);
2941 static void drrs_status_per_crtc(struct seq_file *m,
2942 struct drm_device *dev,
2943 struct intel_crtc *intel_crtc)
2945 struct drm_i915_private *dev_priv = to_i915(dev);
2946 struct i915_drrs *drrs = &dev_priv->drrs;
2948 struct drm_connector *connector;
2949 struct drm_connector_list_iter conn_iter;
2951 drm_connector_list_iter_begin(dev, &conn_iter);
2952 drm_for_each_connector_iter(connector, &conn_iter) {
2953 if (connector->state->crtc != &intel_crtc->base)
2956 seq_printf(m, "%s:\n", connector->name);
2958 drm_connector_list_iter_end(&conn_iter);
2960 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
2961 seq_puts(m, "\tVBT: DRRS_type: Static");
2962 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
2963 seq_puts(m, "\tVBT: DRRS_type: Seamless");
2964 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
2965 seq_puts(m, "\tVBT: DRRS_type: None");
2967 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
2969 seq_puts(m, "\n\n");
2971 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
2972 struct intel_panel *panel;
2974 mutex_lock(&drrs->mutex);
2975 /* DRRS Supported */
2976 seq_puts(m, "\tDRRS Supported: Yes\n");
2978 /* disable_drrs() will make drrs->dp NULL */
2980 seq_puts(m, "Idleness DRRS: Disabled\n");
2981 if (dev_priv->psr.enabled)
2983 "\tAs PSR is enabled, DRRS is not enabled\n");
2984 mutex_unlock(&drrs->mutex);
2988 panel = &drrs->dp->attached_connector->panel;
2989 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
2990 drrs->busy_frontbuffer_bits);
2992 seq_puts(m, "\n\t\t");
2993 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
2994 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
2995 vrefresh = panel->fixed_mode->vrefresh;
2996 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
2997 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
2998 vrefresh = panel->downclock_mode->vrefresh;
3000 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3001 drrs->refresh_rate_type);
3002 mutex_unlock(&drrs->mutex);
3005 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3007 seq_puts(m, "\n\t\t");
3008 mutex_unlock(&drrs->mutex);
3010 /* DRRS not supported. Print the VBT parameter*/
3011 seq_puts(m, "\tDRRS Supported : No");
3016 static int i915_drrs_status(struct seq_file *m, void *unused)
3018 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3019 struct drm_device *dev = &dev_priv->drm;
3020 struct intel_crtc *intel_crtc;
3021 int active_crtc_cnt = 0;
3023 drm_modeset_lock_all(dev);
3024 for_each_intel_crtc(dev, intel_crtc) {
3025 if (intel_crtc->base.state->active) {
3027 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3029 drrs_status_per_crtc(m, dev, intel_crtc);
3032 drm_modeset_unlock_all(dev);
3034 if (!active_crtc_cnt)
3035 seq_puts(m, "No active crtc found\n");
3040 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3042 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3043 struct drm_device *dev = &dev_priv->drm;
3044 struct intel_encoder *intel_encoder;
3045 struct intel_digital_port *intel_dig_port;
3046 struct drm_connector *connector;
3047 struct drm_connector_list_iter conn_iter;
3049 drm_connector_list_iter_begin(dev, &conn_iter);
3050 drm_for_each_connector_iter(connector, &conn_iter) {
3051 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3054 intel_encoder = intel_attached_encoder(connector);
3055 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3058 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3059 if (!intel_dig_port->dp.can_mst)
3062 seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
3063 intel_dig_port->base.base.base.id,
3064 intel_dig_port->base.base.name);
3065 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3067 drm_connector_list_iter_end(&conn_iter);
3072 static ssize_t i915_displayport_test_active_write(struct file *file,
3073 const char __user *ubuf,
3074 size_t len, loff_t *offp)
3078 struct drm_device *dev;
3079 struct drm_connector *connector;
3080 struct drm_connector_list_iter conn_iter;
3081 struct intel_dp *intel_dp;
3084 dev = ((struct seq_file *)file->private_data)->private;
3089 input_buffer = memdup_user_nul(ubuf, len);
3090 if (IS_ERR(input_buffer))
3091 return PTR_ERR(input_buffer);
3093 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3095 drm_connector_list_iter_begin(dev, &conn_iter);
3096 drm_for_each_connector_iter(connector, &conn_iter) {
3097 struct intel_encoder *encoder;
3099 if (connector->connector_type !=
3100 DRM_MODE_CONNECTOR_DisplayPort)
3103 encoder = to_intel_encoder(connector->encoder);
3104 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3107 if (encoder && connector->status == connector_status_connected) {
3108 intel_dp = enc_to_intel_dp(&encoder->base);
3109 status = kstrtoint(input_buffer, 10, &val);
3112 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3113 /* To prevent erroneous activation of the compliance
3114 * testing code, only accept an actual value of 1 here
3117 intel_dp->compliance.test_active = 1;
3119 intel_dp->compliance.test_active = 0;
3122 drm_connector_list_iter_end(&conn_iter);
3123 kfree(input_buffer);
3131 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3133 struct drm_i915_private *dev_priv = m->private;
3134 struct drm_device *dev = &dev_priv->drm;
3135 struct drm_connector *connector;
3136 struct drm_connector_list_iter conn_iter;
3137 struct intel_dp *intel_dp;
3139 drm_connector_list_iter_begin(dev, &conn_iter);
3140 drm_for_each_connector_iter(connector, &conn_iter) {
3141 struct intel_encoder *encoder;
3143 if (connector->connector_type !=
3144 DRM_MODE_CONNECTOR_DisplayPort)
3147 encoder = to_intel_encoder(connector->encoder);
3148 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3151 if (encoder && connector->status == connector_status_connected) {
3152 intel_dp = enc_to_intel_dp(&encoder->base);
3153 if (intel_dp->compliance.test_active)
3160 drm_connector_list_iter_end(&conn_iter);
3165 static int i915_displayport_test_active_open(struct inode *inode,
3168 return single_open(file, i915_displayport_test_active_show,
3172 static const struct file_operations i915_displayport_test_active_fops = {
3173 .owner = THIS_MODULE,
3174 .open = i915_displayport_test_active_open,
3176 .llseek = seq_lseek,
3177 .release = single_release,
3178 .write = i915_displayport_test_active_write
3181 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3183 struct drm_i915_private *dev_priv = m->private;
3184 struct drm_device *dev = &dev_priv->drm;
3185 struct drm_connector *connector;
3186 struct drm_connector_list_iter conn_iter;
3187 struct intel_dp *intel_dp;
3189 drm_connector_list_iter_begin(dev, &conn_iter);
3190 drm_for_each_connector_iter(connector, &conn_iter) {
3191 struct intel_encoder *encoder;
3193 if (connector->connector_type !=
3194 DRM_MODE_CONNECTOR_DisplayPort)
3197 encoder = to_intel_encoder(connector->encoder);
3198 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3201 if (encoder && connector->status == connector_status_connected) {
3202 intel_dp = enc_to_intel_dp(&encoder->base);
3203 if (intel_dp->compliance.test_type ==
3204 DP_TEST_LINK_EDID_READ)
3205 seq_printf(m, "%lx",
3206 intel_dp->compliance.test_data.edid);
3207 else if (intel_dp->compliance.test_type ==
3208 DP_TEST_LINK_VIDEO_PATTERN) {
3209 seq_printf(m, "hdisplay: %d\n",
3210 intel_dp->compliance.test_data.hdisplay);
3211 seq_printf(m, "vdisplay: %d\n",
3212 intel_dp->compliance.test_data.vdisplay);
3213 seq_printf(m, "bpc: %u\n",
3214 intel_dp->compliance.test_data.bpc);
3219 drm_connector_list_iter_end(&conn_iter);
3223 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3225 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3227 struct drm_i915_private *dev_priv = m->private;
3228 struct drm_device *dev = &dev_priv->drm;
3229 struct drm_connector *connector;
3230 struct drm_connector_list_iter conn_iter;
3231 struct intel_dp *intel_dp;
3233 drm_connector_list_iter_begin(dev, &conn_iter);
3234 drm_for_each_connector_iter(connector, &conn_iter) {
3235 struct intel_encoder *encoder;
3237 if (connector->connector_type !=
3238 DRM_MODE_CONNECTOR_DisplayPort)
3241 encoder = to_intel_encoder(connector->encoder);
3242 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3245 if (encoder && connector->status == connector_status_connected) {
3246 intel_dp = enc_to_intel_dp(&encoder->base);
3247 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3251 drm_connector_list_iter_end(&conn_iter);
3255 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3257 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
3259 struct drm_i915_private *dev_priv = m->private;
3260 struct drm_device *dev = &dev_priv->drm;
3264 if (IS_CHERRYVIEW(dev_priv))
3266 else if (IS_VALLEYVIEW(dev_priv))
3268 else if (IS_G4X(dev_priv))
3271 num_levels = ilk_wm_max_level(dev_priv) + 1;
3273 drm_modeset_lock_all(dev);
3275 for (level = 0; level < num_levels; level++) {
3276 unsigned int latency = wm[level];
3279 * - WM1+ latency values in 0.5us units
3280 * - latencies are in us on gen9/vlv/chv
3282 if (INTEL_GEN(dev_priv) >= 9 ||
3283 IS_VALLEYVIEW(dev_priv) ||
3284 IS_CHERRYVIEW(dev_priv) ||
3290 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3291 level, wm[level], latency / 10, latency % 10);
3294 drm_modeset_unlock_all(dev);
3297 static int pri_wm_latency_show(struct seq_file *m, void *data)
3299 struct drm_i915_private *dev_priv = m->private;
3300 const u16 *latencies;
3302 if (INTEL_GEN(dev_priv) >= 9)
3303 latencies = dev_priv->wm.skl_latency;
3305 latencies = dev_priv->wm.pri_latency;
3307 wm_latency_show(m, latencies);
3312 static int spr_wm_latency_show(struct seq_file *m, void *data)
3314 struct drm_i915_private *dev_priv = m->private;
3315 const u16 *latencies;
3317 if (INTEL_GEN(dev_priv) >= 9)
3318 latencies = dev_priv->wm.skl_latency;
3320 latencies = dev_priv->wm.spr_latency;
3322 wm_latency_show(m, latencies);
3327 static int cur_wm_latency_show(struct seq_file *m, void *data)
3329 struct drm_i915_private *dev_priv = m->private;
3330 const u16 *latencies;
3332 if (INTEL_GEN(dev_priv) >= 9)
3333 latencies = dev_priv->wm.skl_latency;
3335 latencies = dev_priv->wm.cur_latency;
3337 wm_latency_show(m, latencies);
3342 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3344 struct drm_i915_private *dev_priv = inode->i_private;
3346 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3349 return single_open(file, pri_wm_latency_show, dev_priv);
3352 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3354 struct drm_i915_private *dev_priv = inode->i_private;
3356 if (HAS_GMCH(dev_priv))
3359 return single_open(file, spr_wm_latency_show, dev_priv);
3362 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3364 struct drm_i915_private *dev_priv = inode->i_private;
3366 if (HAS_GMCH(dev_priv))
3369 return single_open(file, cur_wm_latency_show, dev_priv);
3372 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3373 size_t len, loff_t *offp, u16 wm[8])
3375 struct seq_file *m = file->private_data;
3376 struct drm_i915_private *dev_priv = m->private;
3377 struct drm_device *dev = &dev_priv->drm;
3384 if (IS_CHERRYVIEW(dev_priv))
3386 else if (IS_VALLEYVIEW(dev_priv))
3388 else if (IS_G4X(dev_priv))
3391 num_levels = ilk_wm_max_level(dev_priv) + 1;
3393 if (len >= sizeof(tmp))
3396 if (copy_from_user(tmp, ubuf, len))
3401 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3402 &new[0], &new[1], &new[2], &new[3],
3403 &new[4], &new[5], &new[6], &new[7]);
3404 if (ret != num_levels)
3407 drm_modeset_lock_all(dev);
3409 for (level = 0; level < num_levels; level++)
3410 wm[level] = new[level];
3412 drm_modeset_unlock_all(dev);
3418 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3419 size_t len, loff_t *offp)
3421 struct seq_file *m = file->private_data;
3422 struct drm_i915_private *dev_priv = m->private;
3425 if (INTEL_GEN(dev_priv) >= 9)
3426 latencies = dev_priv->wm.skl_latency;
3428 latencies = dev_priv->wm.pri_latency;
3430 return wm_latency_write(file, ubuf, len, offp, latencies);
3433 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3434 size_t len, loff_t *offp)
3436 struct seq_file *m = file->private_data;
3437 struct drm_i915_private *dev_priv = m->private;
3440 if (INTEL_GEN(dev_priv) >= 9)
3441 latencies = dev_priv->wm.skl_latency;
3443 latencies = dev_priv->wm.spr_latency;
3445 return wm_latency_write(file, ubuf, len, offp, latencies);
3448 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3449 size_t len, loff_t *offp)
3451 struct seq_file *m = file->private_data;
3452 struct drm_i915_private *dev_priv = m->private;
3455 if (INTEL_GEN(dev_priv) >= 9)
3456 latencies = dev_priv->wm.skl_latency;
3458 latencies = dev_priv->wm.cur_latency;
3460 return wm_latency_write(file, ubuf, len, offp, latencies);
3463 static const struct file_operations i915_pri_wm_latency_fops = {
3464 .owner = THIS_MODULE,
3465 .open = pri_wm_latency_open,
3467 .llseek = seq_lseek,
3468 .release = single_release,
3469 .write = pri_wm_latency_write
3472 static const struct file_operations i915_spr_wm_latency_fops = {
3473 .owner = THIS_MODULE,
3474 .open = spr_wm_latency_open,
3476 .llseek = seq_lseek,
3477 .release = single_release,
3478 .write = spr_wm_latency_write
3481 static const struct file_operations i915_cur_wm_latency_fops = {
3482 .owner = THIS_MODULE,
3483 .open = cur_wm_latency_open,
3485 .llseek = seq_lseek,
3486 .release = single_release,
3487 .write = cur_wm_latency_write
3491 i915_wedged_get(void *data, u64 *val)
3493 struct drm_i915_private *i915 = data;
3494 int ret = intel_gt_terminally_wedged(&i915->gt);
3509 i915_wedged_set(void *data, u64 val)
3511 struct drm_i915_private *i915 = data;
3513 /* Flush any previous reset before applying for a new one */
3514 wait_event(i915->gt.reset.queue,
3515 !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
3517 intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
3518 "Manually set wedged engine mask = %llx", val);
3522 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3523 i915_wedged_get, i915_wedged_set,
3527 i915_perf_noa_delay_set(void *data, u64 val)
3529 struct drm_i915_private *i915 = data;
3530 const u32 clk = RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
3533 * This would lead to infinite waits as we're doing timestamp
3534 * difference on the CS with only 32bits.
3536 if (val > mul_u32_u32(U32_MAX, clk))
3539 atomic64_set(&i915->perf.noa_programming_delay, val);
3544 i915_perf_noa_delay_get(void *data, u64 *val)
3546 struct drm_i915_private *i915 = data;
3548 *val = atomic64_read(&i915->perf.noa_programming_delay);
3552 DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
3553 i915_perf_noa_delay_get,
3554 i915_perf_noa_delay_set,
3557 #define DROP_UNBOUND BIT(0)
3558 #define DROP_BOUND BIT(1)
3559 #define DROP_RETIRE BIT(2)
3560 #define DROP_ACTIVE BIT(3)
3561 #define DROP_FREED BIT(4)
3562 #define DROP_SHRINK_ALL BIT(5)
3563 #define DROP_IDLE BIT(6)
3564 #define DROP_RESET_ACTIVE BIT(7)
3565 #define DROP_RESET_SEQNO BIT(8)
3566 #define DROP_RCU BIT(9)
3567 #define DROP_ALL (DROP_UNBOUND | \
3574 DROP_RESET_ACTIVE | \
3575 DROP_RESET_SEQNO | \
3578 i915_drop_caches_get(void *data, u64 *val)
3585 gt_drop_caches(struct intel_gt *gt, u64 val)
3589 if (val & DROP_RESET_ACTIVE &&
3590 wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
3591 intel_gt_set_wedged(gt);
3593 if (val & DROP_RETIRE)
3594 intel_gt_retire_requests(gt);
3596 if (val & (DROP_IDLE | DROP_ACTIVE)) {
3597 ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
3602 if (val & DROP_IDLE) {
3603 ret = intel_gt_pm_wait_for_idle(gt);
3608 if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
3609 intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
3615 i915_drop_caches_set(void *data, u64 val)
3617 struct drm_i915_private *i915 = data;
3620 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3621 val, val & DROP_ALL);
3623 ret = gt_drop_caches(&i915->gt, val);
3627 fs_reclaim_acquire(GFP_KERNEL);
3628 if (val & DROP_BOUND)
3629 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
3631 if (val & DROP_UNBOUND)
3632 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
3634 if (val & DROP_SHRINK_ALL)
3635 i915_gem_shrink_all(i915);
3636 fs_reclaim_release(GFP_KERNEL);
3641 if (val & DROP_FREED)
3642 i915_gem_drain_freed_objects(i915);
3647 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3648 i915_drop_caches_get, i915_drop_caches_set,
3652 i915_cache_sharing_get(void *data, u64 *val)
3654 struct drm_i915_private *dev_priv = data;
3655 intel_wakeref_t wakeref;
3658 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3661 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
3662 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3664 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3670 i915_cache_sharing_set(void *data, u64 val)
3672 struct drm_i915_private *dev_priv = data;
3673 intel_wakeref_t wakeref;
3675 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3681 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3682 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3685 /* Update the cache sharing policy here as well */
3686 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3687 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3688 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3689 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3696 intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
3699 int offset = slice * sseu->ss_stride;
3701 memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
3704 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3705 i915_cache_sharing_get, i915_cache_sharing_set,
3708 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
3709 struct sseu_dev_info *sseu)
3712 const int ss_max = SS_MAX;
3713 u32 sig1[SS_MAX], sig2[SS_MAX];
3716 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
3717 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
3718 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
3719 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
3721 for (ss = 0; ss < ss_max; ss++) {
3722 unsigned int eu_cnt;
3724 if (sig1[ss] & CHV_SS_PG_ENABLE)
3725 /* skip disabled subslice */
3728 sseu->slice_mask = BIT(0);
3729 sseu->subslice_mask[0] |= BIT(ss);
3730 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
3731 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
3732 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
3733 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
3734 sseu->eu_total += eu_cnt;
3735 sseu->eu_per_subslice = max_t(unsigned int,
3736 sseu->eu_per_subslice, eu_cnt);
3741 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
3742 struct sseu_dev_info *sseu)
3745 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3746 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3749 for (s = 0; s < info->sseu.max_slices; s++) {
3751 * FIXME: Valid SS Mask respects the spec and read
3752 * only valid bits for those registers, excluding reserved
3753 * although this seems wrong because it would leave many
3754 * subslices without ACK.
3756 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
3757 GEN10_PGCTL_VALID_SS_MASK(s);
3758 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
3759 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
3762 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3763 GEN9_PGCTL_SSA_EU19_ACK |
3764 GEN9_PGCTL_SSA_EU210_ACK |
3765 GEN9_PGCTL_SSA_EU311_ACK;
3766 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3767 GEN9_PGCTL_SSB_EU19_ACK |
3768 GEN9_PGCTL_SSB_EU210_ACK |
3769 GEN9_PGCTL_SSB_EU311_ACK;
3771 for (s = 0; s < info->sseu.max_slices; s++) {
3772 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3773 /* skip disabled slice */
3776 sseu->slice_mask |= BIT(s);
3777 intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
3779 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3780 unsigned int eu_cnt;
3782 if (info->sseu.has_subslice_pg &&
3783 !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3784 /* skip disabled subslice */
3787 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
3789 sseu->eu_total += eu_cnt;
3790 sseu->eu_per_subslice = max_t(unsigned int,
3791 sseu->eu_per_subslice,
3798 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
3799 struct sseu_dev_info *sseu)
3802 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3803 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3806 for (s = 0; s < info->sseu.max_slices; s++) {
3807 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
3808 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
3809 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
3812 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3813 GEN9_PGCTL_SSA_EU19_ACK |
3814 GEN9_PGCTL_SSA_EU210_ACK |
3815 GEN9_PGCTL_SSA_EU311_ACK;
3816 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3817 GEN9_PGCTL_SSB_EU19_ACK |
3818 GEN9_PGCTL_SSB_EU210_ACK |
3819 GEN9_PGCTL_SSB_EU311_ACK;
3821 for (s = 0; s < info->sseu.max_slices; s++) {
3822 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3823 /* skip disabled slice */
3826 sseu->slice_mask |= BIT(s);
3828 if (IS_GEN9_BC(dev_priv))
3829 intel_sseu_copy_subslices(&info->sseu, s,
3830 sseu->subslice_mask);
3832 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3833 unsigned int eu_cnt;
3834 u8 ss_idx = s * info->sseu.ss_stride +
3837 if (IS_GEN9_LP(dev_priv)) {
3838 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3839 /* skip disabled subslice */
3842 sseu->subslice_mask[ss_idx] |=
3843 BIT(ss % BITS_PER_BYTE);
3846 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
3848 sseu->eu_total += eu_cnt;
3849 sseu->eu_per_subslice = max_t(unsigned int,
3850 sseu->eu_per_subslice,
3857 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
3858 struct sseu_dev_info *sseu)
3860 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3861 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
3864 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
3866 if (sseu->slice_mask) {
3867 sseu->eu_per_subslice = info->sseu.eu_per_subslice;
3868 for (s = 0; s < fls(sseu->slice_mask); s++)
3869 intel_sseu_copy_subslices(&info->sseu, s,
3870 sseu->subslice_mask);
3871 sseu->eu_total = sseu->eu_per_subslice *
3872 intel_sseu_subslice_total(sseu);
3874 /* subtract fused off EU(s) from enabled slice(s) */
3875 for (s = 0; s < fls(sseu->slice_mask); s++) {
3876 u8 subslice_7eu = info->sseu.subslice_7eu[s];
3878 sseu->eu_total -= hweight8(subslice_7eu);
3883 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
3884 const struct sseu_dev_info *sseu)
3886 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3887 const char *type = is_available_info ? "Available" : "Enabled";
3890 seq_printf(m, " %s Slice Mask: %04x\n", type,
3892 seq_printf(m, " %s Slice Total: %u\n", type,
3893 hweight8(sseu->slice_mask));
3894 seq_printf(m, " %s Subslice Total: %u\n", type,
3895 intel_sseu_subslice_total(sseu));
3896 for (s = 0; s < fls(sseu->slice_mask); s++) {
3897 seq_printf(m, " %s Slice%i subslices: %u\n", type,
3898 s, intel_sseu_subslices_per_slice(sseu, s));
3900 seq_printf(m, " %s EU Total: %u\n", type,
3902 seq_printf(m, " %s EU Per Subslice: %u\n", type,
3903 sseu->eu_per_subslice);
3905 if (!is_available_info)
3908 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
3909 if (HAS_POOLED_EU(dev_priv))
3910 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
3912 seq_printf(m, " Has Slice Power Gating: %s\n",
3913 yesno(sseu->has_slice_pg));
3914 seq_printf(m, " Has Subslice Power Gating: %s\n",
3915 yesno(sseu->has_subslice_pg));
3916 seq_printf(m, " Has EU Power Gating: %s\n",
3917 yesno(sseu->has_eu_pg));
3920 static int i915_sseu_status(struct seq_file *m, void *unused)
3922 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3923 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3924 struct sseu_dev_info sseu;
3925 intel_wakeref_t wakeref;
3927 if (INTEL_GEN(dev_priv) < 8)
3930 seq_puts(m, "SSEU Device Info\n");
3931 i915_print_sseu_info(m, true, &info->sseu);
3933 seq_puts(m, "SSEU Device Status\n");
3934 memset(&sseu, 0, sizeof(sseu));
3935 intel_sseu_set_info(&sseu, info->sseu.max_slices,
3936 info->sseu.max_subslices,
3937 info->sseu.max_eus_per_subslice);
3939 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3940 if (IS_CHERRYVIEW(dev_priv))
3941 cherryview_sseu_device_status(dev_priv, &sseu);
3942 else if (IS_BROADWELL(dev_priv))
3943 broadwell_sseu_device_status(dev_priv, &sseu);
3944 else if (IS_GEN(dev_priv, 9))
3945 gen9_sseu_device_status(dev_priv, &sseu);
3946 else if (INTEL_GEN(dev_priv) >= 10)
3947 gen10_sseu_device_status(dev_priv, &sseu);
3950 i915_print_sseu_info(m, false, &sseu);
3955 static int i915_forcewake_open(struct inode *inode, struct file *file)
3957 struct drm_i915_private *i915 = inode->i_private;
3958 struct intel_gt *gt = &i915->gt;
3960 atomic_inc(>->user_wakeref);
3961 intel_gt_pm_get(gt);
3962 if (INTEL_GEN(i915) >= 6)
3963 intel_uncore_forcewake_user_get(gt->uncore);
3968 static int i915_forcewake_release(struct inode *inode, struct file *file)
3970 struct drm_i915_private *i915 = inode->i_private;
3971 struct intel_gt *gt = &i915->gt;
3973 if (INTEL_GEN(i915) >= 6)
3974 intel_uncore_forcewake_user_put(&i915->uncore);
3975 intel_gt_pm_put(gt);
3976 atomic_dec(>->user_wakeref);
3981 static const struct file_operations i915_forcewake_fops = {
3982 .owner = THIS_MODULE,
3983 .open = i915_forcewake_open,
3984 .release = i915_forcewake_release,
3987 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
3989 struct drm_i915_private *dev_priv = m->private;
3990 struct i915_hotplug *hotplug = &dev_priv->hotplug;
3992 /* Synchronize with everything first in case there's been an HPD
3993 * storm, but we haven't finished handling it in the kernel yet
3995 intel_synchronize_irq(dev_priv);
3996 flush_work(&dev_priv->hotplug.dig_port_work);
3997 flush_delayed_work(&dev_priv->hotplug.hotplug_work);
3999 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4000 seq_printf(m, "Detected: %s\n",
4001 yesno(delayed_work_pending(&hotplug->reenable_work)));
4006 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4007 const char __user *ubuf, size_t len,
4010 struct seq_file *m = file->private_data;
4011 struct drm_i915_private *dev_priv = m->private;
4012 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4013 unsigned int new_threshold;
4018 if (len >= sizeof(tmp))
4021 if (copy_from_user(tmp, ubuf, len))
4026 /* Strip newline, if any */
4027 newline = strchr(tmp, '\n');
4031 if (strcmp(tmp, "reset") == 0)
4032 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4033 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4036 if (new_threshold > 0)
4037 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4040 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4042 spin_lock_irq(&dev_priv->irq_lock);
4043 hotplug->hpd_storm_threshold = new_threshold;
4044 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4046 hotplug->stats[i].count = 0;
4047 spin_unlock_irq(&dev_priv->irq_lock);
4049 /* Re-enable hpd immediately if we were in an irq storm */
4050 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4055 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4057 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4060 static const struct file_operations i915_hpd_storm_ctl_fops = {
4061 .owner = THIS_MODULE,
4062 .open = i915_hpd_storm_ctl_open,
4064 .llseek = seq_lseek,
4065 .release = single_release,
4066 .write = i915_hpd_storm_ctl_write
4069 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4071 struct drm_i915_private *dev_priv = m->private;
4073 seq_printf(m, "Enabled: %s\n",
4074 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4080 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4082 return single_open(file, i915_hpd_short_storm_ctl_show,
4086 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4087 const char __user *ubuf,
4088 size_t len, loff_t *offp)
4090 struct seq_file *m = file->private_data;
4091 struct drm_i915_private *dev_priv = m->private;
4092 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4098 if (len >= sizeof(tmp))
4101 if (copy_from_user(tmp, ubuf, len))
4106 /* Strip newline, if any */
4107 newline = strchr(tmp, '\n');
4111 /* Reset to the "default" state for this system */
4112 if (strcmp(tmp, "reset") == 0)
4113 new_state = !HAS_DP_MST(dev_priv);
4114 else if (kstrtobool(tmp, &new_state) != 0)
4117 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4118 new_state ? "En" : "Dis");
4120 spin_lock_irq(&dev_priv->irq_lock);
4121 hotplug->hpd_short_storm_enabled = new_state;
4122 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4124 hotplug->stats[i].count = 0;
4125 spin_unlock_irq(&dev_priv->irq_lock);
4127 /* Re-enable hpd immediately if we were in an irq storm */
4128 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4133 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4134 .owner = THIS_MODULE,
4135 .open = i915_hpd_short_storm_ctl_open,
4137 .llseek = seq_lseek,
4138 .release = single_release,
4139 .write = i915_hpd_short_storm_ctl_write,
4142 static int i915_drrs_ctl_set(void *data, u64 val)
4144 struct drm_i915_private *dev_priv = data;
4145 struct drm_device *dev = &dev_priv->drm;
4146 struct intel_crtc *crtc;
4148 if (INTEL_GEN(dev_priv) < 7)
4151 for_each_intel_crtc(dev, crtc) {
4152 struct drm_connector_list_iter conn_iter;
4153 struct intel_crtc_state *crtc_state;
4154 struct drm_connector *connector;
4155 struct drm_crtc_commit *commit;
4158 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4162 crtc_state = to_intel_crtc_state(crtc->base.state);
4164 if (!crtc_state->base.active ||
4165 !crtc_state->has_drrs)
4168 commit = crtc_state->base.commit;
4170 ret = wait_for_completion_interruptible(&commit->hw_done);
4175 drm_connector_list_iter_begin(dev, &conn_iter);
4176 drm_for_each_connector_iter(connector, &conn_iter) {
4177 struct intel_encoder *encoder;
4178 struct intel_dp *intel_dp;
4180 if (!(crtc_state->base.connector_mask &
4181 drm_connector_mask(connector)))
4184 encoder = intel_attached_encoder(connector);
4185 if (encoder->type != INTEL_OUTPUT_EDP)
4188 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4189 val ? "en" : "dis", val);
4191 intel_dp = enc_to_intel_dp(&encoder->base);
4193 intel_edp_drrs_enable(intel_dp,
4196 intel_edp_drrs_disable(intel_dp,
4199 drm_connector_list_iter_end(&conn_iter);
4202 drm_modeset_unlock(&crtc->base.mutex);
4210 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4213 i915_fifo_underrun_reset_write(struct file *filp,
4214 const char __user *ubuf,
4215 size_t cnt, loff_t *ppos)
4217 struct drm_i915_private *dev_priv = filp->private_data;
4218 struct intel_crtc *intel_crtc;
4219 struct drm_device *dev = &dev_priv->drm;
4223 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4230 for_each_intel_crtc(dev, intel_crtc) {
4231 struct drm_crtc_commit *commit;
4232 struct intel_crtc_state *crtc_state;
4234 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4238 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4239 commit = crtc_state->base.commit;
4241 ret = wait_for_completion_interruptible(&commit->hw_done);
4243 ret = wait_for_completion_interruptible(&commit->flip_done);
4246 if (!ret && crtc_state->base.active) {
4247 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4248 pipe_name(intel_crtc->pipe));
4250 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4253 drm_modeset_unlock(&intel_crtc->base.mutex);
4259 ret = intel_fbc_reset_underrun(dev_priv);
4266 static const struct file_operations i915_fifo_underrun_reset_ops = {
4267 .owner = THIS_MODULE,
4268 .open = simple_open,
4269 .write = i915_fifo_underrun_reset_write,
4270 .llseek = default_llseek,
4273 static const struct drm_info_list i915_debugfs_list[] = {
4274 {"i915_capabilities", i915_capabilities, 0},
4275 {"i915_gem_objects", i915_gem_object_info, 0},
4276 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4277 {"i915_gem_interrupt", i915_interrupt_info, 0},
4278 {"i915_guc_info", i915_guc_info, 0},
4279 {"i915_guc_load_status", i915_guc_load_status_info, 0},
4280 {"i915_guc_log_dump", i915_guc_log_dump, 0},
4281 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4282 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4283 {"i915_huc_load_status", i915_huc_load_status_info, 0},
4284 {"i915_frequency_info", i915_frequency_info, 0},
4285 {"i915_drpc_info", i915_drpc_info, 0},
4286 {"i915_ring_freq_table", i915_ring_freq_table, 0},
4287 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4288 {"i915_fbc_status", i915_fbc_status, 0},
4289 {"i915_ips_status", i915_ips_status, 0},
4290 {"i915_sr_status", i915_sr_status, 0},
4291 {"i915_opregion", i915_opregion, 0},
4292 {"i915_vbt", i915_vbt, 0},
4293 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4294 {"i915_context_status", i915_context_status, 0},
4295 {"i915_forcewake_domains", i915_forcewake_domains, 0},
4296 {"i915_swizzle_info", i915_swizzle_info, 0},
4297 {"i915_llc", i915_llc, 0},
4298 {"i915_edp_psr_status", i915_edp_psr_status, 0},
4299 {"i915_energy_uJ", i915_energy_uJ, 0},
4300 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4301 {"i915_power_domain_info", i915_power_domain_info, 0},
4302 {"i915_dmc_info", i915_dmc_info, 0},
4303 {"i915_display_info", i915_display_info, 0},
4304 {"i915_engine_info", i915_engine_info, 0},
4305 {"i915_rcs_topology", i915_rcs_topology, 0},
4306 {"i915_shrinker_info", i915_shrinker_info, 0},
4307 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4308 {"i915_dp_mst_info", i915_dp_mst_info, 0},
4309 {"i915_wa_registers", i915_wa_registers, 0},
4310 {"i915_ddb_info", i915_ddb_info, 0},
4311 {"i915_sseu_status", i915_sseu_status, 0},
4312 {"i915_drrs_status", i915_drrs_status, 0},
4313 {"i915_rps_boost_info", i915_rps_boost_info, 0},
4315 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4317 static const struct i915_debugfs_files {
4319 const struct file_operations *fops;
4320 } i915_debugfs_files[] = {
4321 {"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
4322 {"i915_wedged", &i915_wedged_fops},
4323 {"i915_cache_sharing", &i915_cache_sharing_fops},
4324 {"i915_gem_drop_caches", &i915_drop_caches_fops},
4325 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4326 {"i915_error_state", &i915_error_state_fops},
4327 {"i915_gpu_info", &i915_gpu_info_fops},
4329 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4330 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4331 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4332 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4333 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4334 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4335 {"i915_dp_test_type", &i915_displayport_test_type_fops},
4336 {"i915_dp_test_active", &i915_displayport_test_active_fops},
4337 {"i915_guc_log_level", &i915_guc_log_level_fops},
4338 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4339 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4340 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4341 {"i915_ipc_status", &i915_ipc_status_fops},
4342 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4343 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4346 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4348 struct drm_minor *minor = dev_priv->drm.primary;
4351 debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
4352 to_i915(minor->dev), &i915_forcewake_fops);
4354 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4355 debugfs_create_file(i915_debugfs_files[i].name,
4357 minor->debugfs_root,
4358 to_i915(minor->dev),
4359 i915_debugfs_files[i].fops);
4362 return drm_debugfs_create_files(i915_debugfs_list,
4363 I915_DEBUGFS_ENTRIES,
4364 minor->debugfs_root, minor);
4368 /* DPCD dump start address. */
4369 unsigned int offset;
4370 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4372 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4374 /* Only valid for eDP. */
4378 static const struct dpcd_block i915_dpcd_debug[] = {
4379 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4380 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4381 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4382 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4383 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4384 { .offset = DP_SET_POWER },
4385 { .offset = DP_EDP_DPCD_REV },
4386 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4387 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4388 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4391 static int i915_dpcd_show(struct seq_file *m, void *data)
4393 struct drm_connector *connector = m->private;
4394 struct intel_dp *intel_dp =
4395 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4400 if (connector->status != connector_status_connected)
4403 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4404 const struct dpcd_block *b = &i915_dpcd_debug[i];
4405 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4408 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4411 /* low tech for now */
4412 if (WARN_ON(size > sizeof(buf)))
4415 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4417 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4419 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4424 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4426 static int i915_panel_show(struct seq_file *m, void *data)
4428 struct drm_connector *connector = m->private;
4429 struct intel_dp *intel_dp =
4430 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4432 if (connector->status != connector_status_connected)
4435 seq_printf(m, "Panel power up delay: %d\n",
4436 intel_dp->panel_power_up_delay);
4437 seq_printf(m, "Panel power down delay: %d\n",
4438 intel_dp->panel_power_down_delay);
4439 seq_printf(m, "Backlight on delay: %d\n",
4440 intel_dp->backlight_on_delay);
4441 seq_printf(m, "Backlight off delay: %d\n",
4442 intel_dp->backlight_off_delay);
4446 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4448 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4450 struct drm_connector *connector = m->private;
4451 struct intel_connector *intel_connector = to_intel_connector(connector);
4453 if (connector->status != connector_status_connected)
4456 /* HDCP is supported by connector */
4457 if (!intel_connector->hdcp.shim)
4460 seq_printf(m, "%s:%d HDCP version: ", connector->name,
4461 connector->base.id);
4462 intel_hdcp_info(m, intel_connector);
4466 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4468 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4470 struct drm_connector *connector = m->private;
4471 struct drm_device *dev = connector->dev;
4472 struct drm_crtc *crtc;
4473 struct intel_dp *intel_dp;
4474 struct drm_modeset_acquire_ctx ctx;
4475 struct intel_crtc_state *crtc_state = NULL;
4477 bool try_again = false;
4479 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4483 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4486 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4492 crtc = connector->state->crtc;
4493 if (connector->status != connector_status_connected || !crtc) {
4497 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4498 if (ret == -EDEADLK) {
4499 ret = drm_modeset_backoff(&ctx);
4508 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4509 crtc_state = to_intel_crtc_state(crtc->state);
4510 seq_printf(m, "DSC_Enabled: %s\n",
4511 yesno(crtc_state->dsc.compression_enable));
4512 seq_printf(m, "DSC_Sink_Support: %s\n",
4513 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4514 seq_printf(m, "Force_DSC_Enable: %s\n",
4515 yesno(intel_dp->force_dsc_en));
4516 if (!intel_dp_is_edp(intel_dp))
4517 seq_printf(m, "FEC_Sink_Support: %s\n",
4518 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4519 } while (try_again);
4521 drm_modeset_drop_locks(&ctx);
4522 drm_modeset_acquire_fini(&ctx);
4527 static ssize_t i915_dsc_fec_support_write(struct file *file,
4528 const char __user *ubuf,
4529 size_t len, loff_t *offp)
4531 bool dsc_enable = false;
4533 struct drm_connector *connector =
4534 ((struct seq_file *)file->private_data)->private;
4535 struct intel_encoder *encoder = intel_attached_encoder(connector);
4536 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4541 DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4544 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4548 DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4549 (dsc_enable) ? "true" : "false");
4550 intel_dp->force_dsc_en = dsc_enable;
4556 static int i915_dsc_fec_support_open(struct inode *inode,
4559 return single_open(file, i915_dsc_fec_support_show,
4563 static const struct file_operations i915_dsc_fec_support_fops = {
4564 .owner = THIS_MODULE,
4565 .open = i915_dsc_fec_support_open,
4567 .llseek = seq_lseek,
4568 .release = single_release,
4569 .write = i915_dsc_fec_support_write
4573 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4574 * @connector: pointer to a registered drm_connector
4576 * Cleanup will be done by drm_connector_unregister() through a call to
4577 * drm_debugfs_connector_remove().
4579 * Returns 0 on success, negative error codes on error.
4581 int i915_debugfs_connector_add(struct drm_connector *connector)
4583 struct dentry *root = connector->debugfs_entry;
4584 struct drm_i915_private *dev_priv = to_i915(connector->dev);
4586 /* The connector must have been registered beforehands. */
4590 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4591 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4592 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4593 connector, &i915_dpcd_fops);
4595 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4596 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4597 connector, &i915_panel_fops);
4598 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4599 connector, &i915_psr_sink_status_fops);
4602 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4603 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4604 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4605 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4606 connector, &i915_hdcp_sink_capability_fops);
4609 if (INTEL_GEN(dev_priv) >= 10 &&
4610 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4611 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4612 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4613 connector, &i915_dsc_fec_support_fops);