]> Git Repo - linux.git/blame - drivers/gpu/drm/i915/i915_debugfs.c
drm/i915: Always backoff after a drm_modeset_lock() deadlock
[linux.git] / drivers / gpu / drm / i915 / i915_debugfs.c
CommitLineData
2017263e
BG
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <[email protected]>
25 * Keith Packard <[email protected]>
26 *
27 */
28
e637d2cb 29#include <linux/sort.h>
d92a8cfc 30#include <linux/sched/mm.h>
fcd70cd3
SV
31#include <drm/drm_debugfs.h>
32#include <drm/drm_fourcc.h>
4e5359cd 33#include "intel_drv.h"
a2695744 34#include "intel_guc_submission.h"
2017263e 35
9f58892e
CW
36#include "i915_reset.h"
37
36cdd013
DW
38static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
39{
40 return to_i915(node->minor->dev);
41}
42
70d39fe4
CW
43static int i915_capabilities(struct seq_file *m, void *data)
44{
36cdd013
DW
45 struct drm_i915_private *dev_priv = node_to_i915(m->private);
46 const struct intel_device_info *info = INTEL_INFO(dev_priv);
a8c9b849 47 struct drm_printer p = drm_seq_file_printer(m);
70d39fe4 48
36cdd013 49 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
2e0d26f8 50 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
36cdd013 51 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
418e3cd8 52
a8c9b849 53 intel_device_info_dump_flags(info, &p);
0258404f 54 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
3fed1808 55 intel_driver_caps_print(&dev_priv->caps, &p);
70d39fe4 56
418e3cd8 57 kernel_param_lock(THIS_MODULE);
acfb9973 58 i915_params_dump(&i915_modparams, &p);
418e3cd8
CW
59 kernel_param_unlock(THIS_MODULE);
60
70d39fe4
CW
61 return 0;
62}
2017263e 63
a7363de7 64static char get_active_flag(struct drm_i915_gem_object *obj)
a6172a80 65{
573adb39 66 return i915_gem_object_is_active(obj) ? '*' : ' ';
a6172a80
CW
67}
68
a7363de7 69static char get_pin_flag(struct drm_i915_gem_object *obj)
be12a86b 70{
bd3d2252 71 return obj->pin_global ? 'p' : ' ';
be12a86b
TU
72}
73
a7363de7 74static char get_tiling_flag(struct drm_i915_gem_object *obj)
a6172a80 75{
3e510a8e 76 switch (i915_gem_object_get_tiling(obj)) {
0206e353 77 default:
be12a86b
TU
78 case I915_TILING_NONE: return ' ';
79 case I915_TILING_X: return 'X';
80 case I915_TILING_Y: return 'Y';
0206e353 81 }
a6172a80
CW
82}
83
a7363de7 84static char get_global_flag(struct drm_i915_gem_object *obj)
be12a86b 85{
a65adaf8 86 return obj->userfault_count ? 'g' : ' ';
be12a86b
TU
87}
88
a7363de7 89static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
1d693bcc 90{
a4f5ea64 91 return obj->mm.mapping ? 'M' : ' ';
1d693bcc
BW
92}
93
ca1543be
TU
94static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
95{
96 u64 size = 0;
97 struct i915_vma *vma;
98
e2189dd0
CW
99 for_each_ggtt_vma(vma, obj) {
100 if (drm_mm_node_allocated(&vma->node))
ca1543be
TU
101 size += vma->node.size;
102 }
103
104 return size;
105}
106
7393b7ee
MA
107static const char *
108stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
109{
110 size_t x = 0;
111
112 switch (page_sizes) {
113 case 0:
114 return "";
115 case I915_GTT_PAGE_SIZE_4K:
116 return "4K";
117 case I915_GTT_PAGE_SIZE_64K:
118 return "64K";
119 case I915_GTT_PAGE_SIZE_2M:
120 return "2M";
121 default:
122 if (!buf)
123 return "M";
124
125 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
126 x += snprintf(buf + x, len - x, "2M, ");
127 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
128 x += snprintf(buf + x, len - x, "64K, ");
129 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
130 x += snprintf(buf + x, len - x, "4K, ");
131 buf[x-2] = '\0';
132
133 return buf;
134 }
135}
136
37811fcc
CW
137static void
138describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
139{
b4716185 140 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
e2f80391 141 struct intel_engine_cs *engine;
1d693bcc 142 struct i915_vma *vma;
faf5bf0a 143 unsigned int frontbuffer_bits;
d7f46fc4
BW
144 int pin_count = 0;
145
188c1ab7
CW
146 lockdep_assert_held(&obj->base.dev->struct_mutex);
147
d07f0e59 148 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
37811fcc 149 &obj->base,
be12a86b 150 get_active_flag(obj),
37811fcc
CW
151 get_pin_flag(obj),
152 get_tiling_flag(obj),
1d693bcc 153 get_global_flag(obj),
be12a86b 154 get_pin_mapped_flag(obj),
a05a5862 155 obj->base.size / 1024,
c0a51fd0
CK
156 obj->read_domains,
157 obj->write_domain,
36cdd013 158 i915_cache_level_str(dev_priv, obj->cache_level),
a4f5ea64
CW
159 obj->mm.dirty ? " dirty" : "",
160 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
37811fcc
CW
161 if (obj->base.name)
162 seq_printf(m, " (name: %d)", obj->base.name);
528cbd17 163 list_for_each_entry(vma, &obj->vma.list, obj_link) {
20dfbde4 164 if (i915_vma_is_pinned(vma))
d7f46fc4 165 pin_count++;
ba0635ff
DC
166 }
167 seq_printf(m, " (pinned x %d)", pin_count);
bd3d2252
CW
168 if (obj->pin_global)
169 seq_printf(m, " (global)");
528cbd17 170 list_for_each_entry(vma, &obj->vma.list, obj_link) {
15717de2
CW
171 if (!drm_mm_node_allocated(&vma->node))
172 continue;
173
7393b7ee 174 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
3272db53 175 i915_vma_is_ggtt(vma) ? "g" : "pp",
7393b7ee
MA
176 vma->node.start, vma->node.size,
177 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
21976853
CW
178 if (i915_vma_is_ggtt(vma)) {
179 switch (vma->ggtt_view.type) {
180 case I915_GGTT_VIEW_NORMAL:
181 seq_puts(m, ", normal");
182 break;
183
184 case I915_GGTT_VIEW_PARTIAL:
185 seq_printf(m, ", partial [%08llx+%x]",
8bab1193
CW
186 vma->ggtt_view.partial.offset << PAGE_SHIFT,
187 vma->ggtt_view.partial.size << PAGE_SHIFT);
21976853
CW
188 break;
189
190 case I915_GGTT_VIEW_ROTATED:
191 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
8bab1193
CW
192 vma->ggtt_view.rotated.plane[0].width,
193 vma->ggtt_view.rotated.plane[0].height,
194 vma->ggtt_view.rotated.plane[0].stride,
195 vma->ggtt_view.rotated.plane[0].offset,
196 vma->ggtt_view.rotated.plane[1].width,
197 vma->ggtt_view.rotated.plane[1].height,
198 vma->ggtt_view.rotated.plane[1].stride,
199 vma->ggtt_view.rotated.plane[1].offset);
21976853
CW
200 break;
201
202 default:
203 MISSING_CASE(vma->ggtt_view.type);
204 break;
205 }
206 }
49ef5294
CW
207 if (vma->fence)
208 seq_printf(m, " , fence: %d%s",
209 vma->fence->id,
21950ee7 210 i915_active_request_isset(&vma->last_fence) ? "*" : "");
596c5923 211 seq_puts(m, ")");
1d693bcc 212 }
c1ad11fc 213 if (obj->stolen)
440fd528 214 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
27c01aae 215
d07f0e59 216 engine = i915_gem_object_last_write_engine(obj);
27c01aae
CW
217 if (engine)
218 seq_printf(m, " (%s)", engine->name);
219
faf5bf0a
CW
220 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
221 if (frontbuffer_bits)
222 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
37811fcc
CW
223}
224
e637d2cb 225static int obj_rank_by_stolen(const void *A, const void *B)
6d2b8885 226{
e637d2cb
CW
227 const struct drm_i915_gem_object *a =
228 *(const struct drm_i915_gem_object **)A;
229 const struct drm_i915_gem_object *b =
230 *(const struct drm_i915_gem_object **)B;
6d2b8885 231
2d05fa16
RV
232 if (a->stolen->start < b->stolen->start)
233 return -1;
234 if (a->stolen->start > b->stolen->start)
235 return 1;
236 return 0;
6d2b8885
CW
237}
238
239static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
240{
36cdd013
DW
241 struct drm_i915_private *dev_priv = node_to_i915(m->private);
242 struct drm_device *dev = &dev_priv->drm;
e637d2cb 243 struct drm_i915_gem_object **objects;
6d2b8885 244 struct drm_i915_gem_object *obj;
c44ef60e 245 u64 total_obj_size, total_gtt_size;
e637d2cb
CW
246 unsigned long total, count, n;
247 int ret;
248
249 total = READ_ONCE(dev_priv->mm.object_count);
2098105e 250 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
e637d2cb
CW
251 if (!objects)
252 return -ENOMEM;
6d2b8885
CW
253
254 ret = mutex_lock_interruptible(&dev->struct_mutex);
255 if (ret)
e637d2cb 256 goto out;
6d2b8885
CW
257
258 total_obj_size = total_gtt_size = count = 0;
f2123818
CW
259
260 spin_lock(&dev_priv->mm.obj_lock);
261 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
e637d2cb
CW
262 if (count == total)
263 break;
264
6d2b8885
CW
265 if (obj->stolen == NULL)
266 continue;
267
e637d2cb 268 objects[count++] = obj;
6d2b8885 269 total_obj_size += obj->base.size;
ca1543be 270 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
e637d2cb 271
6d2b8885 272 }
f2123818 273 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
e637d2cb
CW
274 if (count == total)
275 break;
276
6d2b8885
CW
277 if (obj->stolen == NULL)
278 continue;
279
e637d2cb 280 objects[count++] = obj;
6d2b8885 281 total_obj_size += obj->base.size;
6d2b8885 282 }
f2123818 283 spin_unlock(&dev_priv->mm.obj_lock);
e637d2cb
CW
284
285 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
286
6d2b8885 287 seq_puts(m, "Stolen:\n");
e637d2cb 288 for (n = 0; n < count; n++) {
6d2b8885 289 seq_puts(m, " ");
e637d2cb 290 describe_obj(m, objects[n]);
6d2b8885 291 seq_putc(m, '\n');
6d2b8885 292 }
e637d2cb 293 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
6d2b8885 294 count, total_obj_size, total_gtt_size);
e637d2cb
CW
295
296 mutex_unlock(&dev->struct_mutex);
297out:
2098105e 298 kvfree(objects);
e637d2cb 299 return ret;
6d2b8885
CW
300}
301
2db8e9d6 302struct file_stats {
f6e8aa38 303 struct i915_address_space *vm;
c44ef60e
MK
304 unsigned long count;
305 u64 total, unbound;
306 u64 global, shared;
307 u64 active, inactive;
f6e8aa38 308 u64 closed;
2db8e9d6
CW
309};
310
311static int per_file_stats(int id, void *ptr, void *data)
312{
313 struct drm_i915_gem_object *obj = ptr;
314 struct file_stats *stats = data;
6313c204 315 struct i915_vma *vma;
2db8e9d6 316
0caf81b5
CW
317 lockdep_assert_held(&obj->base.dev->struct_mutex);
318
2db8e9d6
CW
319 stats->count++;
320 stats->total += obj->base.size;
15717de2
CW
321 if (!obj->bind_count)
322 stats->unbound += obj->base.size;
c67a17e9
CW
323 if (obj->base.name || obj->base.dma_buf)
324 stats->shared += obj->base.size;
325
528cbd17 326 list_for_each_entry(vma, &obj->vma.list, obj_link) {
894eeecc
CW
327 if (!drm_mm_node_allocated(&vma->node))
328 continue;
6313c204 329
3272db53 330 if (i915_vma_is_ggtt(vma)) {
894eeecc
CW
331 stats->global += vma->node.size;
332 } else {
f6e8aa38 333 if (vma->vm != stats->vm)
6313c204 334 continue;
6313c204 335 }
894eeecc 336
b0decaf7 337 if (i915_vma_is_active(vma))
894eeecc
CW
338 stats->active += vma->node.size;
339 else
340 stats->inactive += vma->node.size;
f6e8aa38
CW
341
342 if (i915_vma_is_closed(vma))
343 stats->closed += vma->node.size;
2db8e9d6
CW
344 }
345
346 return 0;
347}
348
b0da1b79
CW
349#define print_file_stats(m, name, stats) do { \
350 if (stats.count) \
f6e8aa38 351 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
b0da1b79
CW
352 name, \
353 stats.count, \
354 stats.total, \
355 stats.active, \
356 stats.inactive, \
357 stats.global, \
358 stats.shared, \
f6e8aa38
CW
359 stats.unbound, \
360 stats.closed); \
b0da1b79 361} while (0)
493018dc
BV
362
363static void print_batch_pool_stats(struct seq_file *m,
364 struct drm_i915_private *dev_priv)
365{
366 struct drm_i915_gem_object *obj;
e2f80391 367 struct intel_engine_cs *engine;
f6e8aa38 368 struct file_stats stats = {};
3b3f1650 369 enum intel_engine_id id;
b4ac5afc 370 int j;
493018dc 371
3b3f1650 372 for_each_engine(engine, dev_priv, id) {
e2f80391 373 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
8d9d5744 374 list_for_each_entry(obj,
e2f80391 375 &engine->batch_pool.cache_list[j],
8d9d5744
CW
376 batch_pool_link)
377 per_file_stats(0, obj, &stats);
378 }
06fbca71 379 }
493018dc 380
b0da1b79 381 print_file_stats(m, "[k]batch pool", stats);
493018dc
BV
382}
383
f6e8aa38
CW
384static void print_context_stats(struct seq_file *m,
385 struct drm_i915_private *i915)
15da9565 386{
f6e8aa38
CW
387 struct file_stats kstats = {};
388 struct i915_gem_context *ctx;
ab82a063 389
f6e8aa38
CW
390 list_for_each_entry(ctx, &i915->contexts.list, link) {
391 struct intel_engine_cs *engine;
392 enum intel_engine_id id;
15da9565 393
f6e8aa38
CW
394 for_each_engine(engine, i915, id) {
395 struct intel_context *ce = to_intel_context(ctx, engine);
15da9565 396
f6e8aa38
CW
397 if (ce->state)
398 per_file_stats(0, ce->state->obj, &kstats);
399 if (ce->ring)
400 per_file_stats(0, ce->ring->vma->obj, &kstats);
401 }
15da9565 402
f6e8aa38
CW
403 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
404 struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
405 struct drm_file *file = ctx->file_priv->file;
406 struct task_struct *task;
407 char name[80];
15da9565 408
f6e8aa38
CW
409 spin_lock(&file->table_lock);
410 idr_for_each(&file->object_idr, per_file_stats, &stats);
411 spin_unlock(&file->table_lock);
15da9565 412
f6e8aa38
CW
413 rcu_read_lock();
414 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
415 snprintf(name, sizeof(name), "%s/%d",
416 task ? task->comm : "<unknown>",
417 ctx->user_handle);
418 rcu_read_unlock();
15da9565 419
f6e8aa38
CW
420 print_file_stats(m, name, stats);
421 }
15da9565 422 }
15da9565 423
f6e8aa38 424 print_file_stats(m, "[k]contexts", kstats);
15da9565
CW
425}
426
36cdd013 427static int i915_gem_object_info(struct seq_file *m, void *data)
73aa808f 428{
36cdd013
DW
429 struct drm_i915_private *dev_priv = node_to_i915(m->private);
430 struct drm_device *dev = &dev_priv->drm;
72e96d64 431 struct i915_ggtt *ggtt = &dev_priv->ggtt;
7393b7ee
MA
432 u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
433 u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
6299f992 434 struct drm_i915_gem_object *obj;
7393b7ee 435 unsigned int page_sizes = 0;
7393b7ee 436 char buf[80];
73aa808f
CW
437 int ret;
438
3ef7f228 439 seq_printf(m, "%u objects, %llu bytes\n",
6299f992
CW
440 dev_priv->mm.object_count,
441 dev_priv->mm.object_memory);
442
1544c42e
CW
443 size = count = 0;
444 mapped_size = mapped_count = 0;
445 purgeable_size = purgeable_count = 0;
7393b7ee 446 huge_size = huge_count = 0;
f2123818
CW
447
448 spin_lock(&dev_priv->mm.obj_lock);
449 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
2bd160a1
CW
450 size += obj->base.size;
451 ++count;
452
a4f5ea64 453 if (obj->mm.madv == I915_MADV_DONTNEED) {
2bd160a1
CW
454 purgeable_size += obj->base.size;
455 ++purgeable_count;
456 }
457
a4f5ea64 458 if (obj->mm.mapping) {
2bd160a1
CW
459 mapped_count++;
460 mapped_size += obj->base.size;
be19b10d 461 }
7393b7ee
MA
462
463 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
464 huge_count++;
465 huge_size += obj->base.size;
466 page_sizes |= obj->mm.page_sizes.sg;
467 }
b7abb714 468 }
c44ef60e 469 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
6c085a72 470
2bd160a1 471 size = count = dpy_size = dpy_count = 0;
f2123818 472 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
2bd160a1
CW
473 size += obj->base.size;
474 ++count;
475
bd3d2252 476 if (obj->pin_global) {
2bd160a1
CW
477 dpy_size += obj->base.size;
478 ++dpy_count;
6299f992 479 }
2bd160a1 480
a4f5ea64 481 if (obj->mm.madv == I915_MADV_DONTNEED) {
b7abb714
CW
482 purgeable_size += obj->base.size;
483 ++purgeable_count;
484 }
2bd160a1 485
a4f5ea64 486 if (obj->mm.mapping) {
2bd160a1
CW
487 mapped_count++;
488 mapped_size += obj->base.size;
be19b10d 489 }
7393b7ee
MA
490
491 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
492 huge_count++;
493 huge_size += obj->base.size;
494 page_sizes |= obj->mm.page_sizes.sg;
495 }
6299f992 496 }
f2123818
CW
497 spin_unlock(&dev_priv->mm.obj_lock);
498
2bd160a1
CW
499 seq_printf(m, "%u bound objects, %llu bytes\n",
500 count, size);
c44ef60e 501 seq_printf(m, "%u purgeable objects, %llu bytes\n",
b7abb714 502 purgeable_count, purgeable_size);
2bd160a1
CW
503 seq_printf(m, "%u mapped objects, %llu bytes\n",
504 mapped_count, mapped_size);
7393b7ee
MA
505 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
506 huge_count,
507 stringify_page_sizes(page_sizes, buf, sizeof(buf)),
508 huge_size);
bd3d2252 509 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
2bd160a1 510 dpy_count, dpy_size);
6299f992 511
b7128ef1 512 seq_printf(m, "%llu [%pa] gtt total\n",
82ad6443 513 ggtt->vm.total, &ggtt->mappable_end);
7393b7ee
MA
514 seq_printf(m, "Supported page sizes: %s\n",
515 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
516 buf, sizeof(buf)));
73aa808f 517
493018dc 518 seq_putc(m, '\n');
1d2ac403 519
f6e8aa38
CW
520 ret = mutex_lock_interruptible(&dev->struct_mutex);
521 if (ret)
522 return ret;
523
524 print_batch_pool_stats(m, dev_priv);
15da9565 525 print_context_stats(m, dev_priv);
f6e8aa38 526 mutex_unlock(&dev->struct_mutex);
73aa808f
CW
527
528 return 0;
529}
530
aee56cff 531static int i915_gem_gtt_info(struct seq_file *m, void *data)
08c18323 532{
9f25d007 533 struct drm_info_node *node = m->private;
36cdd013
DW
534 struct drm_i915_private *dev_priv = node_to_i915(node);
535 struct drm_device *dev = &dev_priv->drm;
f2123818 536 struct drm_i915_gem_object **objects;
08c18323 537 struct drm_i915_gem_object *obj;
c44ef60e 538 u64 total_obj_size, total_gtt_size;
f2123818 539 unsigned long nobject, n;
08c18323
CW
540 int count, ret;
541
f2123818
CW
542 nobject = READ_ONCE(dev_priv->mm.object_count);
543 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
544 if (!objects)
545 return -ENOMEM;
546
08c18323
CW
547 ret = mutex_lock_interruptible(&dev->struct_mutex);
548 if (ret)
549 return ret;
550
f2123818
CW
551 count = 0;
552 spin_lock(&dev_priv->mm.obj_lock);
553 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
554 objects[count++] = obj;
555 if (count == nobject)
556 break;
557 }
558 spin_unlock(&dev_priv->mm.obj_lock);
559
560 total_obj_size = total_gtt_size = 0;
561 for (n = 0; n < count; n++) {
562 obj = objects[n];
563
267f0c90 564 seq_puts(m, " ");
08c18323 565 describe_obj(m, obj);
267f0c90 566 seq_putc(m, '\n');
08c18323 567 total_obj_size += obj->base.size;
ca1543be 568 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
08c18323
CW
569 }
570
571 mutex_unlock(&dev->struct_mutex);
572
c44ef60e 573 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
08c18323 574 count, total_obj_size, total_gtt_size);
f2123818 575 kvfree(objects);
08c18323
CW
576
577 return 0;
578}
579
493018dc
BV
580static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
581{
36cdd013
DW
582 struct drm_i915_private *dev_priv = node_to_i915(m->private);
583 struct drm_device *dev = &dev_priv->drm;
493018dc 584 struct drm_i915_gem_object *obj;
e2f80391 585 struct intel_engine_cs *engine;
3b3f1650 586 enum intel_engine_id id;
8d9d5744 587 int total = 0;
b4ac5afc 588 int ret, j;
493018dc
BV
589
590 ret = mutex_lock_interruptible(&dev->struct_mutex);
591 if (ret)
592 return ret;
593
3b3f1650 594 for_each_engine(engine, dev_priv, id) {
e2f80391 595 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
8d9d5744
CW
596 int count;
597
598 count = 0;
599 list_for_each_entry(obj,
e2f80391 600 &engine->batch_pool.cache_list[j],
8d9d5744
CW
601 batch_pool_link)
602 count++;
603 seq_printf(m, "%s cache[%d]: %d objects\n",
e2f80391 604 engine->name, j, count);
8d9d5744
CW
605
606 list_for_each_entry(obj,
e2f80391 607 &engine->batch_pool.cache_list[j],
8d9d5744
CW
608 batch_pool_link) {
609 seq_puts(m, " ");
610 describe_obj(m, obj);
611 seq_putc(m, '\n');
612 }
613
614 total += count;
06fbca71 615 }
493018dc
BV
616 }
617
8d9d5744 618 seq_printf(m, "total: %d\n", total);
493018dc
BV
619
620 mutex_unlock(&dev->struct_mutex);
621
622 return 0;
623}
624
80d89350
TU
625static void gen8_display_interrupt_info(struct seq_file *m)
626{
627 struct drm_i915_private *dev_priv = node_to_i915(m->private);
628 int pipe;
629
630 for_each_pipe(dev_priv, pipe) {
631 enum intel_display_power_domain power_domain;
0e6e0be4 632 intel_wakeref_t wakeref;
80d89350
TU
633
634 power_domain = POWER_DOMAIN_PIPE(pipe);
0e6e0be4
CW
635 wakeref = intel_display_power_get_if_enabled(dev_priv,
636 power_domain);
637 if (!wakeref) {
80d89350
TU
638 seq_printf(m, "Pipe %c power disabled\n",
639 pipe_name(pipe));
640 continue;
641 }
642 seq_printf(m, "Pipe %c IMR:\t%08x\n",
643 pipe_name(pipe),
644 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
645 seq_printf(m, "Pipe %c IIR:\t%08x\n",
646 pipe_name(pipe),
647 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
648 seq_printf(m, "Pipe %c IER:\t%08x\n",
649 pipe_name(pipe),
650 I915_READ(GEN8_DE_PIPE_IER(pipe)));
651
0e6e0be4 652 intel_display_power_put(dev_priv, power_domain, wakeref);
80d89350
TU
653 }
654
655 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
656 I915_READ(GEN8_DE_PORT_IMR));
657 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
658 I915_READ(GEN8_DE_PORT_IIR));
659 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
660 I915_READ(GEN8_DE_PORT_IER));
661
662 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
663 I915_READ(GEN8_DE_MISC_IMR));
664 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
665 I915_READ(GEN8_DE_MISC_IIR));
666 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
667 I915_READ(GEN8_DE_MISC_IER));
668
669 seq_printf(m, "PCU interrupt mask:\t%08x\n",
670 I915_READ(GEN8_PCU_IMR));
671 seq_printf(m, "PCU interrupt identity:\t%08x\n",
672 I915_READ(GEN8_PCU_IIR));
673 seq_printf(m, "PCU interrupt enable:\t%08x\n",
674 I915_READ(GEN8_PCU_IER));
675}
676
2017263e
BG
677static int i915_interrupt_info(struct seq_file *m, void *data)
678{
36cdd013 679 struct drm_i915_private *dev_priv = node_to_i915(m->private);
e2f80391 680 struct intel_engine_cs *engine;
3b3f1650 681 enum intel_engine_id id;
a037121c 682 intel_wakeref_t wakeref;
4bb05040 683 int i, pipe;
de227ef0 684
a037121c 685 wakeref = intel_runtime_pm_get(dev_priv);
2017263e 686
36cdd013 687 if (IS_CHERRYVIEW(dev_priv)) {
0e6e0be4
CW
688 intel_wakeref_t pref;
689
74e1ca8c
VS
690 seq_printf(m, "Master Interrupt Control:\t%08x\n",
691 I915_READ(GEN8_MASTER_IRQ));
692
693 seq_printf(m, "Display IER:\t%08x\n",
694 I915_READ(VLV_IER));
695 seq_printf(m, "Display IIR:\t%08x\n",
696 I915_READ(VLV_IIR));
697 seq_printf(m, "Display IIR_RW:\t%08x\n",
698 I915_READ(VLV_IIR_RW));
699 seq_printf(m, "Display IMR:\t%08x\n",
700 I915_READ(VLV_IMR));
9c870d03
CW
701 for_each_pipe(dev_priv, pipe) {
702 enum intel_display_power_domain power_domain;
703
704 power_domain = POWER_DOMAIN_PIPE(pipe);
0e6e0be4
CW
705 pref = intel_display_power_get_if_enabled(dev_priv,
706 power_domain);
707 if (!pref) {
9c870d03
CW
708 seq_printf(m, "Pipe %c power disabled\n",
709 pipe_name(pipe));
710 continue;
711 }
712
74e1ca8c
VS
713 seq_printf(m, "Pipe %c stat:\t%08x\n",
714 pipe_name(pipe),
715 I915_READ(PIPESTAT(pipe)));
716
0e6e0be4 717 intel_display_power_put(dev_priv, power_domain, pref);
9c870d03
CW
718 }
719
0e6e0be4 720 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
74e1ca8c
VS
721 seq_printf(m, "Port hotplug:\t%08x\n",
722 I915_READ(PORT_HOTPLUG_EN));
723 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
724 I915_READ(VLV_DPFLIPSTAT));
725 seq_printf(m, "DPINVGTT:\t%08x\n",
726 I915_READ(DPINVGTT));
0e6e0be4 727 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
74e1ca8c
VS
728
729 for (i = 0; i < 4; i++) {
730 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
731 i, I915_READ(GEN8_GT_IMR(i)));
732 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
733 i, I915_READ(GEN8_GT_IIR(i)));
734 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
735 i, I915_READ(GEN8_GT_IER(i)));
736 }
737
738 seq_printf(m, "PCU interrupt mask:\t%08x\n",
739 I915_READ(GEN8_PCU_IMR));
740 seq_printf(m, "PCU interrupt identity:\t%08x\n",
741 I915_READ(GEN8_PCU_IIR));
742 seq_printf(m, "PCU interrupt enable:\t%08x\n",
743 I915_READ(GEN8_PCU_IER));
80d89350
TU
744 } else if (INTEL_GEN(dev_priv) >= 11) {
745 seq_printf(m, "Master Interrupt Control: %08x\n",
746 I915_READ(GEN11_GFX_MSTR_IRQ));
747
748 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
749 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
750 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
751 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
752 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
753 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
754 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
755 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
756 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
757 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
758 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
759 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
760
761 seq_printf(m, "Display Interrupt Control:\t%08x\n",
762 I915_READ(GEN11_DISPLAY_INT_CTL));
763
764 gen8_display_interrupt_info(m);
36cdd013 765 } else if (INTEL_GEN(dev_priv) >= 8) {
a123f157
BW
766 seq_printf(m, "Master Interrupt Control:\t%08x\n",
767 I915_READ(GEN8_MASTER_IRQ));
768
769 for (i = 0; i < 4; i++) {
770 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
771 i, I915_READ(GEN8_GT_IMR(i)));
772 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
773 i, I915_READ(GEN8_GT_IIR(i)));
774 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
775 i, I915_READ(GEN8_GT_IER(i)));
776 }
777
80d89350 778 gen8_display_interrupt_info(m);
36cdd013 779 } else if (IS_VALLEYVIEW(dev_priv)) {
7e231dbe
JB
780 seq_printf(m, "Display IER:\t%08x\n",
781 I915_READ(VLV_IER));
782 seq_printf(m, "Display IIR:\t%08x\n",
783 I915_READ(VLV_IIR));
784 seq_printf(m, "Display IIR_RW:\t%08x\n",
785 I915_READ(VLV_IIR_RW));
786 seq_printf(m, "Display IMR:\t%08x\n",
787 I915_READ(VLV_IMR));
4f4631af
CW
788 for_each_pipe(dev_priv, pipe) {
789 enum intel_display_power_domain power_domain;
0e6e0be4 790 intel_wakeref_t pref;
4f4631af
CW
791
792 power_domain = POWER_DOMAIN_PIPE(pipe);
0e6e0be4
CW
793 pref = intel_display_power_get_if_enabled(dev_priv,
794 power_domain);
795 if (!pref) {
4f4631af
CW
796 seq_printf(m, "Pipe %c power disabled\n",
797 pipe_name(pipe));
798 continue;
799 }
800
7e231dbe
JB
801 seq_printf(m, "Pipe %c stat:\t%08x\n",
802 pipe_name(pipe),
803 I915_READ(PIPESTAT(pipe)));
0e6e0be4 804 intel_display_power_put(dev_priv, power_domain, pref);
4f4631af 805 }
7e231dbe
JB
806
807 seq_printf(m, "Master IER:\t%08x\n",
808 I915_READ(VLV_MASTER_IER));
809
810 seq_printf(m, "Render IER:\t%08x\n",
811 I915_READ(GTIER));
812 seq_printf(m, "Render IIR:\t%08x\n",
813 I915_READ(GTIIR));
814 seq_printf(m, "Render IMR:\t%08x\n",
815 I915_READ(GTIMR));
816
817 seq_printf(m, "PM IER:\t\t%08x\n",
818 I915_READ(GEN6_PMIER));
819 seq_printf(m, "PM IIR:\t\t%08x\n",
820 I915_READ(GEN6_PMIIR));
821 seq_printf(m, "PM IMR:\t\t%08x\n",
822 I915_READ(GEN6_PMIMR));
823
824 seq_printf(m, "Port hotplug:\t%08x\n",
825 I915_READ(PORT_HOTPLUG_EN));
826 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
827 I915_READ(VLV_DPFLIPSTAT));
828 seq_printf(m, "DPINVGTT:\t%08x\n",
829 I915_READ(DPINVGTT));
830
36cdd013 831 } else if (!HAS_PCH_SPLIT(dev_priv)) {
5f6a1695
ZW
832 seq_printf(m, "Interrupt enable: %08x\n",
833 I915_READ(IER));
834 seq_printf(m, "Interrupt identity: %08x\n",
835 I915_READ(IIR));
836 seq_printf(m, "Interrupt mask: %08x\n",
837 I915_READ(IMR));
055e393f 838 for_each_pipe(dev_priv, pipe)
9db4a9c7
JB
839 seq_printf(m, "Pipe %c stat: %08x\n",
840 pipe_name(pipe),
841 I915_READ(PIPESTAT(pipe)));
5f6a1695
ZW
842 } else {
843 seq_printf(m, "North Display Interrupt enable: %08x\n",
844 I915_READ(DEIER));
845 seq_printf(m, "North Display Interrupt identity: %08x\n",
846 I915_READ(DEIIR));
847 seq_printf(m, "North Display Interrupt mask: %08x\n",
848 I915_READ(DEIMR));
849 seq_printf(m, "South Display Interrupt enable: %08x\n",
850 I915_READ(SDEIER));
851 seq_printf(m, "South Display Interrupt identity: %08x\n",
852 I915_READ(SDEIIR));
853 seq_printf(m, "South Display Interrupt mask: %08x\n",
854 I915_READ(SDEIMR));
855 seq_printf(m, "Graphics Interrupt enable: %08x\n",
856 I915_READ(GTIER));
857 seq_printf(m, "Graphics Interrupt identity: %08x\n",
858 I915_READ(GTIIR));
859 seq_printf(m, "Graphics Interrupt mask: %08x\n",
860 I915_READ(GTIMR));
861 }
80d89350
TU
862
863 if (INTEL_GEN(dev_priv) >= 11) {
864 seq_printf(m, "RCS Intr Mask:\t %08x\n",
865 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
866 seq_printf(m, "BCS Intr Mask:\t %08x\n",
867 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
868 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
869 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
870 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
871 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
872 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
873 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
874 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
875 I915_READ(GEN11_GUC_SG_INTR_MASK));
876 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
877 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
878 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
879 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
880 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
881 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
882
883 } else if (INTEL_GEN(dev_priv) >= 6) {
d5acadfe 884 for_each_engine(engine, dev_priv, id) {
a2c7f6fd
CW
885 seq_printf(m,
886 "Graphics Interrupt mask (%s): %08x\n",
e2f80391 887 engine->name, I915_READ_IMR(engine));
9862e600 888 }
9862e600 889 }
80d89350 890
a037121c 891 intel_runtime_pm_put(dev_priv, wakeref);
de227ef0 892
2017263e
BG
893 return 0;
894}
895
a6172a80
CW
896static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
897{
36cdd013
DW
898 struct drm_i915_private *dev_priv = node_to_i915(m->private);
899 struct drm_device *dev = &dev_priv->drm;
de227ef0
CW
900 int i, ret;
901
902 ret = mutex_lock_interruptible(&dev->struct_mutex);
903 if (ret)
904 return ret;
a6172a80 905
a6172a80
CW
906 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
907 for (i = 0; i < dev_priv->num_fence_regs; i++) {
49ef5294 908 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
a6172a80 909
6c085a72
CW
910 seq_printf(m, "Fence %d, pin count = %d, object = ",
911 i, dev_priv->fence_regs[i].pin_count);
49ef5294 912 if (!vma)
267f0c90 913 seq_puts(m, "unused");
c2c347a9 914 else
49ef5294 915 describe_obj(m, vma->obj);
267f0c90 916 seq_putc(m, '\n');
a6172a80
CW
917 }
918
05394f39 919 mutex_unlock(&dev->struct_mutex);
a6172a80
CW
920 return 0;
921}
922
98a2f411 923#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
5a4c6f1b
CW
924static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
925 size_t count, loff_t *pos)
d5442303 926{
0e39037b 927 struct i915_gpu_state *error;
5a4c6f1b 928 ssize_t ret;
0e39037b 929 void *buf;
d5442303 930
0e39037b 931 error = file->private_data;
5a4c6f1b
CW
932 if (!error)
933 return 0;
d5442303 934
0e39037b
CW
935 /* Bounce buffer required because of kernfs __user API convenience. */
936 buf = kmalloc(count, GFP_KERNEL);
937 if (!buf)
938 return -ENOMEM;
d5442303 939
0e39037b
CW
940 ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
941 if (ret <= 0)
5a4c6f1b 942 goto out;
d5442303 943
0e39037b
CW
944 if (!copy_to_user(ubuf, buf, ret))
945 *pos += ret;
946 else
947 ret = -EFAULT;
d5442303 948
5a4c6f1b 949out:
0e39037b 950 kfree(buf);
5a4c6f1b
CW
951 return ret;
952}
edc3d884 953
5a4c6f1b
CW
954static int gpu_state_release(struct inode *inode, struct file *file)
955{
956 i915_gpu_state_put(file->private_data);
edc3d884 957 return 0;
d5442303
SV
958}
959
5a4c6f1b 960static int i915_gpu_info_open(struct inode *inode, struct file *file)
d5442303 961{
090e5fe3 962 struct drm_i915_private *i915 = inode->i_private;
5a4c6f1b 963 struct i915_gpu_state *gpu;
a037121c 964 intel_wakeref_t wakeref;
d5442303 965
d4225a53
CW
966 gpu = NULL;
967 with_intel_runtime_pm(i915, wakeref)
968 gpu = i915_capture_gpu_state(i915);
e6154e4c
CW
969 if (IS_ERR(gpu))
970 return PTR_ERR(gpu);
d5442303 971
5a4c6f1b 972 file->private_data = gpu;
edc3d884
MK
973 return 0;
974}
975
5a4c6f1b
CW
976static const struct file_operations i915_gpu_info_fops = {
977 .owner = THIS_MODULE,
978 .open = i915_gpu_info_open,
979 .read = gpu_state_read,
980 .llseek = default_llseek,
981 .release = gpu_state_release,
982};
983
984static ssize_t
985i915_error_state_write(struct file *filp,
986 const char __user *ubuf,
987 size_t cnt,
988 loff_t *ppos)
4dc955f7 989{
5a4c6f1b 990 struct i915_gpu_state *error = filp->private_data;
4dc955f7 991
5a4c6f1b
CW
992 if (!error)
993 return 0;
edc3d884 994
5a4c6f1b
CW
995 DRM_DEBUG_DRIVER("Resetting error state\n");
996 i915_reset_error_state(error->i915);
edc3d884 997
5a4c6f1b
CW
998 return cnt;
999}
edc3d884 1000
5a4c6f1b
CW
1001static int i915_error_state_open(struct inode *inode, struct file *file)
1002{
e6154e4c
CW
1003 struct i915_gpu_state *error;
1004
1005 error = i915_first_error_state(inode->i_private);
1006 if (IS_ERR(error))
1007 return PTR_ERR(error);
1008
1009 file->private_data = error;
5a4c6f1b 1010 return 0;
d5442303
SV
1011}
1012
1013static const struct file_operations i915_error_state_fops = {
1014 .owner = THIS_MODULE,
1015 .open = i915_error_state_open,
5a4c6f1b 1016 .read = gpu_state_read,
d5442303
SV
1017 .write = i915_error_state_write,
1018 .llseek = default_llseek,
5a4c6f1b 1019 .release = gpu_state_release,
d5442303 1020};
98a2f411
CW
1021#endif
1022
adb4bd12 1023static int i915_frequency_info(struct seq_file *m, void *unused)
f97108d1 1024{
36cdd013 1025 struct drm_i915_private *dev_priv = node_to_i915(m->private);
562d9bae 1026 struct intel_rps *rps = &dev_priv->gt_pm.rps;
a037121c 1027 intel_wakeref_t wakeref;
c8c8fb33
PZ
1028 int ret = 0;
1029
a037121c 1030 wakeref = intel_runtime_pm_get(dev_priv);
3b8d8d91 1031
cf819eff 1032 if (IS_GEN(dev_priv, 5)) {
3b8d8d91
JB
1033 u16 rgvswctl = I915_READ16(MEMSWCTL);
1034 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1035
1036 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1037 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1038 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1039 MEMSTAT_VID_SHIFT);
1040 seq_printf(m, "Current P-state: %d\n",
1041 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
36cdd013 1042 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
0d6fc92a 1043 u32 rpmodectl, freq_sts;
666a4537 1044
9f817501 1045 mutex_lock(&dev_priv->pcu_lock);
0d6fc92a
SAK
1046
1047 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1048 seq_printf(m, "Video Turbo Mode: %s\n",
1049 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1050 seq_printf(m, "HW control enabled: %s\n",
1051 yesno(rpmodectl & GEN6_RP_ENABLE));
1052 seq_printf(m, "SW control enabled: %s\n",
1053 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1054 GEN6_RP_MEDIA_SW_MODE));
1055
666a4537
WB
1056 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1057 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1058 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1059
1060 seq_printf(m, "actual GPU freq: %d MHz\n",
1061 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1062
1063 seq_printf(m, "current GPU freq: %d MHz\n",
562d9bae 1064 intel_gpu_freq(dev_priv, rps->cur_freq));
666a4537
WB
1065
1066 seq_printf(m, "max GPU freq: %d MHz\n",
562d9bae 1067 intel_gpu_freq(dev_priv, rps->max_freq));
666a4537
WB
1068
1069 seq_printf(m, "min GPU freq: %d MHz\n",
562d9bae 1070 intel_gpu_freq(dev_priv, rps->min_freq));
666a4537
WB
1071
1072 seq_printf(m, "idle GPU freq: %d MHz\n",
562d9bae 1073 intel_gpu_freq(dev_priv, rps->idle_freq));
666a4537
WB
1074
1075 seq_printf(m,
1076 "efficient (RPe) frequency: %d MHz\n",
562d9bae 1077 intel_gpu_freq(dev_priv, rps->efficient_freq));
9f817501 1078 mutex_unlock(&dev_priv->pcu_lock);
36cdd013 1079 } else if (INTEL_GEN(dev_priv) >= 6) {
35040562
BP
1080 u32 rp_state_limits;
1081 u32 gt_perf_status;
1082 u32 rp_state_cap;
0d8f9491 1083 u32 rpmodectl, rpinclimit, rpdeclimit;
8e8c06cd 1084 u32 rpstat, cagf, reqf;
ccab5c82
JB
1085 u32 rpupei, rpcurup, rpprevup;
1086 u32 rpdownei, rpcurdown, rpprevdown;
9dd3c605 1087 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
3b8d8d91
JB
1088 int max_freq;
1089
35040562 1090 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
cc3f90f0 1091 if (IS_GEN9_LP(dev_priv)) {
35040562
BP
1092 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1093 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1094 } else {
1095 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1096 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1097 }
1098
3b8d8d91 1099 /* RPSTAT1 is in the GT power well */
59bad947 1100 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
3b8d8d91 1101
8e8c06cd 1102 reqf = I915_READ(GEN6_RPNSWREQ);
35ceabf3 1103 if (INTEL_GEN(dev_priv) >= 9)
60260a5b
AG
1104 reqf >>= 23;
1105 else {
1106 reqf &= ~GEN6_TURBO_DISABLE;
36cdd013 1107 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
60260a5b
AG
1108 reqf >>= 24;
1109 else
1110 reqf >>= 25;
1111 }
7c59a9c1 1112 reqf = intel_gpu_freq(dev_priv, reqf);
8e8c06cd 1113
0d8f9491
CW
1114 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1115 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1116 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1117
ccab5c82 1118 rpstat = I915_READ(GEN6_RPSTAT1);
d6cda9c7
AG
1119 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1120 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1121 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1122 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1123 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1124 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
c84b2705
TU
1125 cagf = intel_gpu_freq(dev_priv,
1126 intel_get_cagf(dev_priv, rpstat));
ccab5c82 1127
59bad947 1128 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
d1ebd816 1129
6b7a6a7b
OM
1130 if (INTEL_GEN(dev_priv) >= 11) {
1131 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1132 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1133 /*
1134 * The equivalent to the PM ISR & IIR cannot be read
1135 * without affecting the current state of the system
1136 */
1137 pm_isr = 0;
1138 pm_iir = 0;
1139 } else if (INTEL_GEN(dev_priv) >= 8) {
9dd3c605
PZ
1140 pm_ier = I915_READ(GEN8_GT_IER(2));
1141 pm_imr = I915_READ(GEN8_GT_IMR(2));
1142 pm_isr = I915_READ(GEN8_GT_ISR(2));
1143 pm_iir = I915_READ(GEN8_GT_IIR(2));
6b7a6a7b
OM
1144 } else {
1145 pm_ier = I915_READ(GEN6_PMIER);
1146 pm_imr = I915_READ(GEN6_PMIMR);
1147 pm_isr = I915_READ(GEN6_PMISR);
1148 pm_iir = I915_READ(GEN6_PMIIR);
9dd3c605 1149 }
6b7a6a7b
OM
1150 pm_mask = I915_READ(GEN6_PMINTRMSK);
1151
960e5465
SAK
1152 seq_printf(m, "Video Turbo Mode: %s\n",
1153 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1154 seq_printf(m, "HW control enabled: %s\n",
1155 yesno(rpmodectl & GEN6_RP_ENABLE));
1156 seq_printf(m, "SW control enabled: %s\n",
1157 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1158 GEN6_RP_MEDIA_SW_MODE));
6b7a6a7b
OM
1159
1160 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1161 pm_ier, pm_imr, pm_mask);
1162 if (INTEL_GEN(dev_priv) <= 10)
1163 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1164 pm_isr, pm_iir);
5dd04556 1165 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
562d9bae 1166 rps->pm_intrmsk_mbz);
3b8d8d91 1167 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
3b8d8d91 1168 seq_printf(m, "Render p-state ratio: %d\n",
35ceabf3 1169 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
3b8d8d91
JB
1170 seq_printf(m, "Render p-state VID: %d\n",
1171 gt_perf_status & 0xff);
1172 seq_printf(m, "Render p-state limit: %d\n",
1173 rp_state_limits & 0xff);
0d8f9491
CW
1174 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1175 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1176 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1177 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
8e8c06cd 1178 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
f82855d3 1179 seq_printf(m, "CAGF: %dMHz\n", cagf);
d6cda9c7
AG
1180 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1181 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1182 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1183 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1184 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1185 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
60548c55
CW
1186 seq_printf(m, "Up threshold: %d%%\n",
1187 rps->power.up_threshold);
d86ed34a 1188
d6cda9c7
AG
1189 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1190 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1191 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1192 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1193 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1194 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
60548c55
CW
1195 seq_printf(m, "Down threshold: %d%%\n",
1196 rps->power.down_threshold);
3b8d8d91 1197
cc3f90f0 1198 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
35040562 1199 rp_state_cap >> 16) & 0xff;
35ceabf3 1200 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1201 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1202 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
7c59a9c1 1203 intel_gpu_freq(dev_priv, max_freq));
3b8d8d91
JB
1204
1205 max_freq = (rp_state_cap & 0xff00) >> 8;
35ceabf3 1206 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1207 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1208 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
7c59a9c1 1209 intel_gpu_freq(dev_priv, max_freq));
3b8d8d91 1210
cc3f90f0 1211 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
35040562 1212 rp_state_cap >> 0) & 0xff;
35ceabf3 1213 max_freq *= (IS_GEN9_BC(dev_priv) ||
2b2874ef 1214 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
3b8d8d91 1215 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
7c59a9c1 1216 intel_gpu_freq(dev_priv, max_freq));
31c77388 1217 seq_printf(m, "Max overclocked frequency: %dMHz\n",
562d9bae 1218 intel_gpu_freq(dev_priv, rps->max_freq));
aed242ff 1219
d86ed34a 1220 seq_printf(m, "Current freq: %d MHz\n",
562d9bae 1221 intel_gpu_freq(dev_priv, rps->cur_freq));
d86ed34a 1222 seq_printf(m, "Actual freq: %d MHz\n", cagf);
aed242ff 1223 seq_printf(m, "Idle freq: %d MHz\n",
562d9bae 1224 intel_gpu_freq(dev_priv, rps->idle_freq));
d86ed34a 1225 seq_printf(m, "Min freq: %d MHz\n",
562d9bae 1226 intel_gpu_freq(dev_priv, rps->min_freq));
29ecd78d 1227 seq_printf(m, "Boost freq: %d MHz\n",
562d9bae 1228 intel_gpu_freq(dev_priv, rps->boost_freq));
d86ed34a 1229 seq_printf(m, "Max freq: %d MHz\n",
562d9bae 1230 intel_gpu_freq(dev_priv, rps->max_freq));
d86ed34a
CW
1231 seq_printf(m,
1232 "efficient (RPe) frequency: %d MHz\n",
562d9bae 1233 intel_gpu_freq(dev_priv, rps->efficient_freq));
3b8d8d91 1234 } else {
267f0c90 1235 seq_puts(m, "no P-state info available\n");
3b8d8d91 1236 }
f97108d1 1237
49cd97a3 1238 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1170f28c
MK
1239 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1240 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1241
a037121c 1242 intel_runtime_pm_put(dev_priv, wakeref);
c8c8fb33 1243 return ret;
f97108d1
JB
1244}
1245
d636951e
BW
1246static void i915_instdone_info(struct drm_i915_private *dev_priv,
1247 struct seq_file *m,
1248 struct intel_instdone *instdone)
1249{
f9e61372
BW
1250 int slice;
1251 int subslice;
1252
d636951e
BW
1253 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1254 instdone->instdone);
1255
1256 if (INTEL_GEN(dev_priv) <= 3)
1257 return;
1258
1259 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1260 instdone->slice_common);
1261
1262 if (INTEL_GEN(dev_priv) <= 6)
1263 return;
1264
f9e61372
BW
1265 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1266 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1267 slice, subslice, instdone->sampler[slice][subslice]);
1268
1269 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1270 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1271 slice, subslice, instdone->row[slice][subslice]);
d636951e
BW
1272}
1273
f654449a
CW
1274static int i915_hangcheck_info(struct seq_file *m, void *unused)
1275{
36cdd013 1276 struct drm_i915_private *dev_priv = node_to_i915(m->private);
e2f80391 1277 struct intel_engine_cs *engine;
666796da
TU
1278 u64 acthd[I915_NUM_ENGINES];
1279 u32 seqno[I915_NUM_ENGINES];
d636951e 1280 struct intel_instdone instdone;
a037121c 1281 intel_wakeref_t wakeref;
c3232b18 1282 enum intel_engine_id id;
f654449a 1283
8af29b0c 1284 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
8c185eca
CW
1285 seq_puts(m, "Wedged\n");
1286 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1287 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
8af29b0c 1288 if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
8c185eca 1289 seq_puts(m, "Waiter holding struct mutex\n");
8af29b0c 1290 if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
8c185eca 1291 seq_puts(m, "struct_mutex blocked for reset\n");
8af29b0c 1292
4f044a88 1293 if (!i915_modparams.enable_hangcheck) {
8c185eca 1294 seq_puts(m, "Hangcheck disabled\n");
f654449a
CW
1295 return 0;
1296 }
1297
d4225a53
CW
1298 with_intel_runtime_pm(dev_priv, wakeref) {
1299 for_each_engine(engine, dev_priv, id) {
1300 acthd[id] = intel_engine_get_active_head(engine);
1301 seqno[id] = intel_engine_get_seqno(engine);
1302 }
ebbc7546 1303
d4225a53 1304 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
ebbc7546
MK
1305 }
1306
8352aea3
CW
1307 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1308 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
f654449a
CW
1309 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1310 jiffies));
8352aea3
CW
1311 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1312 seq_puts(m, "Hangcheck active, work pending\n");
1313 else
1314 seq_puts(m, "Hangcheck inactive\n");
f654449a 1315
f73b5674
CW
1316 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1317
3b3f1650 1318 for_each_engine(engine, dev_priv, id) {
e2f80391 1319 seq_printf(m, "%s:\n", engine->name);
eb8d0f5a 1320 seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
cb399eab 1321 engine->hangcheck.seqno, seqno[id],
eb8d0f5a
CW
1322 intel_engine_last_submit(engine),
1323 jiffies_to_msecs(jiffies -
1324 engine->hangcheck.action_timestamp));
33f53719 1325
f654449a 1326 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
e2f80391 1327 (long long)engine->hangcheck.acthd,
c3232b18 1328 (long long)acthd[id]);
61642ff0 1329
e2f80391 1330 if (engine->id == RCS) {
d636951e 1331 seq_puts(m, "\tinstdone read =\n");
61642ff0 1332
d636951e 1333 i915_instdone_info(dev_priv, m, &instdone);
61642ff0 1334
d636951e 1335 seq_puts(m, "\tinstdone accu =\n");
61642ff0 1336
d636951e
BW
1337 i915_instdone_info(dev_priv, m,
1338 &engine->hangcheck.instdone);
61642ff0 1339 }
f654449a
CW
1340 }
1341
1342 return 0;
1343}
1344
061d06a2
MT
1345static int i915_reset_info(struct seq_file *m, void *unused)
1346{
1347 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1348 struct i915_gpu_error *error = &dev_priv->gpu_error;
1349 struct intel_engine_cs *engine;
1350 enum intel_engine_id id;
1351
1352 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1353
1354 for_each_engine(engine, dev_priv, id) {
1355 seq_printf(m, "%s = %u\n", engine->name,
1356 i915_reset_engine_count(error, engine));
1357 }
1358
1359 return 0;
1360}
1361
4d85529d 1362static int ironlake_drpc_info(struct seq_file *m)
f97108d1 1363{
36cdd013 1364 struct drm_i915_private *dev_priv = node_to_i915(m->private);
616fdb5a
BW
1365 u32 rgvmodectl, rstdbyctl;
1366 u16 crstandvid;
616fdb5a 1367
616fdb5a
BW
1368 rgvmodectl = I915_READ(MEMMODECTL);
1369 rstdbyctl = I915_READ(RSTDBYCTL);
1370 crstandvid = I915_READ16(CRSTANDVID);
1371
742f491d 1372 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
f97108d1
JB
1373 seq_printf(m, "Boost freq: %d\n",
1374 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1375 MEMMODE_BOOST_FREQ_SHIFT);
1376 seq_printf(m, "HW control enabled: %s\n",
742f491d 1377 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
f97108d1 1378 seq_printf(m, "SW control enabled: %s\n",
742f491d 1379 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
f97108d1 1380 seq_printf(m, "Gated voltage change: %s\n",
742f491d 1381 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
f97108d1
JB
1382 seq_printf(m, "Starting frequency: P%d\n",
1383 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
7648fa99 1384 seq_printf(m, "Max P-state: P%d\n",
f97108d1 1385 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
7648fa99
JB
1386 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1387 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1388 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1389 seq_printf(m, "Render standby enabled: %s\n",
742f491d 1390 yesno(!(rstdbyctl & RCX_SW_EXIT)));
267f0c90 1391 seq_puts(m, "Current RS state: ");
88271da3
JB
1392 switch (rstdbyctl & RSX_STATUS_MASK) {
1393 case RSX_STATUS_ON:
267f0c90 1394 seq_puts(m, "on\n");
88271da3
JB
1395 break;
1396 case RSX_STATUS_RC1:
267f0c90 1397 seq_puts(m, "RC1\n");
88271da3
JB
1398 break;
1399 case RSX_STATUS_RC1E:
267f0c90 1400 seq_puts(m, "RC1E\n");
88271da3
JB
1401 break;
1402 case RSX_STATUS_RS1:
267f0c90 1403 seq_puts(m, "RS1\n");
88271da3
JB
1404 break;
1405 case RSX_STATUS_RS2:
267f0c90 1406 seq_puts(m, "RS2 (RC6)\n");
88271da3
JB
1407 break;
1408 case RSX_STATUS_RS3:
267f0c90 1409 seq_puts(m, "RC3 (RC6+)\n");
88271da3
JB
1410 break;
1411 default:
267f0c90 1412 seq_puts(m, "unknown\n");
88271da3
JB
1413 break;
1414 }
f97108d1
JB
1415
1416 return 0;
1417}
1418
f65367b5 1419static int i915_forcewake_domains(struct seq_file *m, void *data)
669ab5aa 1420{
233ebf57 1421 struct drm_i915_private *i915 = node_to_i915(m->private);
b2cff0db 1422 struct intel_uncore_forcewake_domain *fw_domain;
d2dc94bc 1423 unsigned int tmp;
b2cff0db 1424
d7a133d8
CW
1425 seq_printf(m, "user.bypass_count = %u\n",
1426 i915->uncore.user_forcewake.count);
1427
233ebf57 1428 for_each_fw_domain(fw_domain, i915, tmp)
b2cff0db 1429 seq_printf(m, "%s.wake_count = %u\n",
33c582c1 1430 intel_uncore_forcewake_domain_to_str(fw_domain->id),
233ebf57 1431 READ_ONCE(fw_domain->wake_count));
669ab5aa 1432
b2cff0db
CW
1433 return 0;
1434}
1435
1362877e
MK
1436static void print_rc6_res(struct seq_file *m,
1437 const char *title,
1438 const i915_reg_t reg)
1439{
1440 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1441
1442 seq_printf(m, "%s %u (%llu us)\n",
1443 title, I915_READ(reg),
1444 intel_rc6_residency_us(dev_priv, reg));
1445}
1446
b2cff0db
CW
1447static int vlv_drpc_info(struct seq_file *m)
1448{
36cdd013 1449 struct drm_i915_private *dev_priv = node_to_i915(m->private);
0d6fc92a 1450 u32 rcctl1, pw_status;
669ab5aa 1451
6b312cd3 1452 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
669ab5aa
D
1453 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1454
669ab5aa
D
1455 seq_printf(m, "RC6 Enabled: %s\n",
1456 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1457 GEN6_RC_CTL_EI_MODE(1))));
1458 seq_printf(m, "Render Power Well: %s\n",
6b312cd3 1459 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
669ab5aa 1460 seq_printf(m, "Media Power Well: %s\n",
6b312cd3 1461 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
669ab5aa 1462
1362877e
MK
1463 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1464 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
9cc19be5 1465
f65367b5 1466 return i915_forcewake_domains(m, NULL);
669ab5aa
D
1467}
1468
4d85529d
BW
1469static int gen6_drpc_info(struct seq_file *m)
1470{
36cdd013 1471 struct drm_i915_private *dev_priv = node_to_i915(m->private);
960e5465 1472 u32 gt_core_status, rcctl1, rc6vids = 0;
f2dd7578 1473 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
4d85529d 1474
75aa3f63 1475 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
ed71f1b4 1476 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
4d85529d 1477
4d85529d 1478 rcctl1 = I915_READ(GEN6_RC_CONTROL);
36cdd013 1479 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1480 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1481 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1482 }
cf632bd6 1483
51cc9ade
ID
1484 if (INTEL_GEN(dev_priv) <= 7) {
1485 mutex_lock(&dev_priv->pcu_lock);
1486 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1487 &rc6vids);
1488 mutex_unlock(&dev_priv->pcu_lock);
1489 }
4d85529d 1490
fff24e21 1491 seq_printf(m, "RC1e Enabled: %s\n",
4d85529d
BW
1492 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1493 seq_printf(m, "RC6 Enabled: %s\n",
1494 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
36cdd013 1495 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1496 seq_printf(m, "Render Well Gating Enabled: %s\n",
1497 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1498 seq_printf(m, "Media Well Gating Enabled: %s\n",
1499 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1500 }
4d85529d
BW
1501 seq_printf(m, "Deep RC6 Enabled: %s\n",
1502 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1503 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1504 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
267f0c90 1505 seq_puts(m, "Current RC state: ");
4d85529d
BW
1506 switch (gt_core_status & GEN6_RCn_MASK) {
1507 case GEN6_RC0:
1508 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
267f0c90 1509 seq_puts(m, "Core Power Down\n");
4d85529d 1510 else
267f0c90 1511 seq_puts(m, "on\n");
4d85529d
BW
1512 break;
1513 case GEN6_RC3:
267f0c90 1514 seq_puts(m, "RC3\n");
4d85529d
BW
1515 break;
1516 case GEN6_RC6:
267f0c90 1517 seq_puts(m, "RC6\n");
4d85529d
BW
1518 break;
1519 case GEN6_RC7:
267f0c90 1520 seq_puts(m, "RC7\n");
4d85529d
BW
1521 break;
1522 default:
267f0c90 1523 seq_puts(m, "Unknown\n");
4d85529d
BW
1524 break;
1525 }
1526
1527 seq_printf(m, "Core Power Down: %s\n",
1528 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
36cdd013 1529 if (INTEL_GEN(dev_priv) >= 9) {
f2dd7578
AG
1530 seq_printf(m, "Render Power Well: %s\n",
1531 (gen9_powergate_status &
1532 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1533 seq_printf(m, "Media Power Well: %s\n",
1534 (gen9_powergate_status &
1535 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1536 }
cce66a28
BW
1537
1538 /* Not exactly sure what this is */
1362877e
MK
1539 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1540 GEN6_GT_GFX_RC6_LOCKED);
1541 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1542 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1543 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
cce66a28 1544
51cc9ade
ID
1545 if (INTEL_GEN(dev_priv) <= 7) {
1546 seq_printf(m, "RC6 voltage: %dmV\n",
1547 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1548 seq_printf(m, "RC6+ voltage: %dmV\n",
1549 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1550 seq_printf(m, "RC6++ voltage: %dmV\n",
1551 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1552 }
1553
f2dd7578 1554 return i915_forcewake_domains(m, NULL);
4d85529d
BW
1555}
1556
1557static int i915_drpc_info(struct seq_file *m, void *unused)
1558{
36cdd013 1559 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 1560 intel_wakeref_t wakeref;
d4225a53 1561 int err = -ENODEV;
cf632bd6 1562
d4225a53
CW
1563 with_intel_runtime_pm(dev_priv, wakeref) {
1564 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1565 err = vlv_drpc_info(m);
1566 else if (INTEL_GEN(dev_priv) >= 6)
1567 err = gen6_drpc_info(m);
1568 else
1569 err = ironlake_drpc_info(m);
1570 }
cf632bd6
CW
1571
1572 return err;
4d85529d
BW
1573}
1574
9a851789
SV
1575static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1576{
36cdd013 1577 struct drm_i915_private *dev_priv = node_to_i915(m->private);
9a851789
SV
1578
1579 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1580 dev_priv->fb_tracking.busy_bits);
1581
1582 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1583 dev_priv->fb_tracking.flip_bits);
1584
1585 return 0;
1586}
1587
b5e50c3f
JB
1588static int i915_fbc_status(struct seq_file *m, void *unused)
1589{
36cdd013 1590 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3138872c 1591 struct intel_fbc *fbc = &dev_priv->fbc;
a037121c 1592 intel_wakeref_t wakeref;
b5e50c3f 1593
ab309a6a
MW
1594 if (!HAS_FBC(dev_priv))
1595 return -ENODEV;
b5e50c3f 1596
a037121c 1597 wakeref = intel_runtime_pm_get(dev_priv);
3138872c 1598 mutex_lock(&fbc->lock);
36623ef8 1599
0e631adc 1600 if (intel_fbc_is_active(dev_priv))
267f0c90 1601 seq_puts(m, "FBC enabled\n");
2e8144a5 1602 else
3138872c
CW
1603 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1604
3fd5d1ec
VS
1605 if (intel_fbc_is_active(dev_priv)) {
1606 u32 mask;
1607
1608 if (INTEL_GEN(dev_priv) >= 8)
1609 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1610 else if (INTEL_GEN(dev_priv) >= 7)
1611 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1612 else if (INTEL_GEN(dev_priv) >= 5)
1613 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1614 else if (IS_G4X(dev_priv))
1615 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1616 else
1617 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1618 FBC_STAT_COMPRESSED);
1619
1620 seq_printf(m, "Compressing: %s\n", yesno(mask));
0fc6a9dc 1621 }
31b9df10 1622
3138872c 1623 mutex_unlock(&fbc->lock);
a037121c 1624 intel_runtime_pm_put(dev_priv, wakeref);
36623ef8 1625
b5e50c3f
JB
1626 return 0;
1627}
1628
4127dc43 1629static int i915_fbc_false_color_get(void *data, u64 *val)
da46f936 1630{
36cdd013 1631 struct drm_i915_private *dev_priv = data;
da46f936 1632
36cdd013 1633 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
da46f936
RV
1634 return -ENODEV;
1635
da46f936 1636 *val = dev_priv->fbc.false_color;
da46f936
RV
1637
1638 return 0;
1639}
1640
4127dc43 1641static int i915_fbc_false_color_set(void *data, u64 val)
da46f936 1642{
36cdd013 1643 struct drm_i915_private *dev_priv = data;
da46f936
RV
1644 u32 reg;
1645
36cdd013 1646 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
da46f936
RV
1647 return -ENODEV;
1648
25ad93fd 1649 mutex_lock(&dev_priv->fbc.lock);
da46f936
RV
1650
1651 reg = I915_READ(ILK_DPFC_CONTROL);
1652 dev_priv->fbc.false_color = val;
1653
1654 I915_WRITE(ILK_DPFC_CONTROL, val ?
1655 (reg | FBC_CTL_FALSE_COLOR) :
1656 (reg & ~FBC_CTL_FALSE_COLOR));
1657
25ad93fd 1658 mutex_unlock(&dev_priv->fbc.lock);
da46f936
RV
1659 return 0;
1660}
1661
4127dc43
VS
1662DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1663 i915_fbc_false_color_get, i915_fbc_false_color_set,
da46f936
RV
1664 "%llu\n");
1665
92d44621
PZ
1666static int i915_ips_status(struct seq_file *m, void *unused)
1667{
36cdd013 1668 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 1669 intel_wakeref_t wakeref;
92d44621 1670
ab309a6a
MW
1671 if (!HAS_IPS(dev_priv))
1672 return -ENODEV;
92d44621 1673
a037121c 1674 wakeref = intel_runtime_pm_get(dev_priv);
36623ef8 1675
0eaa53f0 1676 seq_printf(m, "Enabled by kernel parameter: %s\n",
4f044a88 1677 yesno(i915_modparams.enable_ips));
0eaa53f0 1678
36cdd013 1679 if (INTEL_GEN(dev_priv) >= 8) {
0eaa53f0
RV
1680 seq_puts(m, "Currently: unknown\n");
1681 } else {
1682 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1683 seq_puts(m, "Currently: enabled\n");
1684 else
1685 seq_puts(m, "Currently: disabled\n");
1686 }
92d44621 1687
a037121c 1688 intel_runtime_pm_put(dev_priv, wakeref);
36623ef8 1689
92d44621
PZ
1690 return 0;
1691}
1692
4a9bef37
JB
1693static int i915_sr_status(struct seq_file *m, void *unused)
1694{
36cdd013 1695 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 1696 intel_wakeref_t wakeref;
4a9bef37
JB
1697 bool sr_enabled = false;
1698
0e6e0be4 1699 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
36623ef8 1700
7342a72c
CW
1701 if (INTEL_GEN(dev_priv) >= 9)
1702 /* no global SR status; inspect per-plane WM */;
1703 else if (HAS_PCH_SPLIT(dev_priv))
5ba2aaaa 1704 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
c0f86832 1705 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
36cdd013 1706 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
4a9bef37 1707 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
36cdd013 1708 else if (IS_I915GM(dev_priv))
4a9bef37 1709 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
36cdd013 1710 else if (IS_PINEVIEW(dev_priv))
4a9bef37 1711 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
36cdd013 1712 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
77b64555 1713 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
4a9bef37 1714
0e6e0be4 1715 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
36623ef8 1716
08c4d7fc 1717 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
4a9bef37
JB
1718
1719 return 0;
1720}
1721
7648fa99
JB
1722static int i915_emon_status(struct seq_file *m, void *unused)
1723{
4a8ab5ea 1724 struct drm_i915_private *i915 = node_to_i915(m->private);
a037121c 1725 intel_wakeref_t wakeref;
de227ef0 1726
4a8ab5ea 1727 if (!IS_GEN(i915, 5))
582be6b4
CW
1728 return -ENODEV;
1729
4a8ab5ea
CW
1730 with_intel_runtime_pm(i915, wakeref) {
1731 unsigned long temp, chipset, gfx;
7648fa99 1732
4a8ab5ea
CW
1733 temp = i915_mch_val(i915);
1734 chipset = i915_chipset_val(i915);
1735 gfx = i915_gfx_val(i915);
7648fa99 1736
4a8ab5ea
CW
1737 seq_printf(m, "GMCH temp: %ld\n", temp);
1738 seq_printf(m, "Chipset power: %ld\n", chipset);
1739 seq_printf(m, "GFX power: %ld\n", gfx);
1740 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1741 }
7648fa99
JB
1742
1743 return 0;
1744}
1745
23b2f8bb
JB
1746static int i915_ring_freq_table(struct seq_file *m, void *unused)
1747{
36cdd013 1748 struct drm_i915_private *dev_priv = node_to_i915(m->private);
562d9bae 1749 struct intel_rps *rps = &dev_priv->gt_pm.rps;
f936ec34 1750 unsigned int max_gpu_freq, min_gpu_freq;
a037121c 1751 intel_wakeref_t wakeref;
d586b5f4
CW
1752 int gpu_freq, ia_freq;
1753 int ret;
23b2f8bb 1754
ab309a6a
MW
1755 if (!HAS_LLC(dev_priv))
1756 return -ENODEV;
23b2f8bb 1757
a037121c 1758 wakeref = intel_runtime_pm_get(dev_priv);
5bfa0199 1759
9f817501 1760 ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
23b2f8bb 1761 if (ret)
5bfa0199 1762 goto out;
23b2f8bb 1763
d586b5f4
CW
1764 min_gpu_freq = rps->min_freq;
1765 max_gpu_freq = rps->max_freq;
2b2874ef 1766 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
f936ec34 1767 /* Convert GT frequency to 50 HZ units */
d586b5f4
CW
1768 min_gpu_freq /= GEN9_FREQ_SCALER;
1769 max_gpu_freq /= GEN9_FREQ_SCALER;
f936ec34
AG
1770 }
1771
267f0c90 1772 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
23b2f8bb 1773
f936ec34 1774 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
42c0526c
BW
1775 ia_freq = gpu_freq;
1776 sandybridge_pcode_read(dev_priv,
1777 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1778 &ia_freq);
3ebecd07 1779 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
f936ec34 1780 intel_gpu_freq(dev_priv, (gpu_freq *
35ceabf3 1781 (IS_GEN9_BC(dev_priv) ||
2b2874ef 1782 INTEL_GEN(dev_priv) >= 10 ?
b976dc53 1783 GEN9_FREQ_SCALER : 1))),
3ebecd07
CW
1784 ((ia_freq >> 0) & 0xff) * 100,
1785 ((ia_freq >> 8) & 0xff) * 100);
23b2f8bb
JB
1786 }
1787
9f817501 1788 mutex_unlock(&dev_priv->pcu_lock);
23b2f8bb 1789
5bfa0199 1790out:
a037121c 1791 intel_runtime_pm_put(dev_priv, wakeref);
5bfa0199 1792 return ret;
23b2f8bb
JB
1793}
1794
44834a67
CW
1795static int i915_opregion(struct seq_file *m, void *unused)
1796{
36cdd013
DW
1797 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1798 struct drm_device *dev = &dev_priv->drm;
44834a67
CW
1799 struct intel_opregion *opregion = &dev_priv->opregion;
1800 int ret;
1801
1802 ret = mutex_lock_interruptible(&dev->struct_mutex);
1803 if (ret)
0d38f009 1804 goto out;
44834a67 1805
2455a8e4
JN
1806 if (opregion->header)
1807 seq_write(m, opregion->header, OPREGION_SIZE);
44834a67
CW
1808
1809 mutex_unlock(&dev->struct_mutex);
1810
0d38f009 1811out:
44834a67
CW
1812 return 0;
1813}
1814
ada8f955
JN
1815static int i915_vbt(struct seq_file *m, void *unused)
1816{
36cdd013 1817 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
ada8f955
JN
1818
1819 if (opregion->vbt)
1820 seq_write(m, opregion->vbt, opregion->vbt_size);
1821
1822 return 0;
1823}
1824
37811fcc
CW
1825static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1826{
36cdd013
DW
1827 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1828 struct drm_device *dev = &dev_priv->drm;
b13b8402 1829 struct intel_framebuffer *fbdev_fb = NULL;
3a58ee10 1830 struct drm_framebuffer *drm_fb;
188c1ab7
CW
1831 int ret;
1832
1833 ret = mutex_lock_interruptible(&dev->struct_mutex);
1834 if (ret)
1835 return ret;
37811fcc 1836
0695726e 1837#ifdef CONFIG_DRM_FBDEV_EMULATION
346fb4e0 1838 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
36cdd013 1839 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
25bcce94
CW
1840
1841 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1842 fbdev_fb->base.width,
1843 fbdev_fb->base.height,
b00c600e 1844 fbdev_fb->base.format->depth,
272725c7 1845 fbdev_fb->base.format->cpp[0] * 8,
bae781b2 1846 fbdev_fb->base.modifier,
25bcce94 1847 drm_framebuffer_read_refcount(&fbdev_fb->base));
a5ff7a45 1848 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
25bcce94
CW
1849 seq_putc(m, '\n');
1850 }
4520f53a 1851#endif
37811fcc 1852
4b096ac1 1853 mutex_lock(&dev->mode_config.fb_lock);
3a58ee10 1854 drm_for_each_fb(drm_fb, dev) {
b13b8402
NS
1855 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1856 if (fb == fbdev_fb)
37811fcc
CW
1857 continue;
1858
c1ca506d 1859 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
37811fcc
CW
1860 fb->base.width,
1861 fb->base.height,
b00c600e 1862 fb->base.format->depth,
272725c7 1863 fb->base.format->cpp[0] * 8,
bae781b2 1864 fb->base.modifier,
747a598f 1865 drm_framebuffer_read_refcount(&fb->base));
a5ff7a45 1866 describe_obj(m, intel_fb_obj(&fb->base));
267f0c90 1867 seq_putc(m, '\n');
37811fcc 1868 }
4b096ac1 1869 mutex_unlock(&dev->mode_config.fb_lock);
188c1ab7 1870 mutex_unlock(&dev->struct_mutex);
37811fcc
CW
1871
1872 return 0;
1873}
1874
7e37f889 1875static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
c9fe99bd 1876{
ef5032a0
CW
1877 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1878 ring->space, ring->head, ring->tail, ring->emit);
c9fe99bd
OM
1879}
1880
e76d3630
BW
1881static int i915_context_status(struct seq_file *m, void *unused)
1882{
36cdd013
DW
1883 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1884 struct drm_device *dev = &dev_priv->drm;
e2f80391 1885 struct intel_engine_cs *engine;
e2efd130 1886 struct i915_gem_context *ctx;
3b3f1650 1887 enum intel_engine_id id;
c3232b18 1888 int ret;
e76d3630 1889
f3d28878 1890 ret = mutex_lock_interruptible(&dev->struct_mutex);
e76d3630
BW
1891 if (ret)
1892 return ret;
1893
829a0af2 1894 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
288f1ced
CW
1895 seq_puts(m, "HW context ");
1896 if (!list_empty(&ctx->hw_id_link))
1897 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1898 atomic_read(&ctx->hw_id_pin_count));
c84455b4 1899 if (ctx->pid) {
d28b99ab
CW
1900 struct task_struct *task;
1901
c84455b4 1902 task = get_pid_task(ctx->pid, PIDTYPE_PID);
d28b99ab
CW
1903 if (task) {
1904 seq_printf(m, "(%s [%d]) ",
1905 task->comm, task->pid);
1906 put_task_struct(task);
1907 }
c84455b4
CW
1908 } else if (IS_ERR(ctx->file_priv)) {
1909 seq_puts(m, "(deleted) ");
d28b99ab
CW
1910 } else {
1911 seq_puts(m, "(kernel) ");
1912 }
1913
bca44d80
CW
1914 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1915 seq_putc(m, '\n');
c9fe99bd 1916
3b3f1650 1917 for_each_engine(engine, dev_priv, id) {
ab82a063
CW
1918 struct intel_context *ce =
1919 to_intel_context(ctx, engine);
bca44d80
CW
1920
1921 seq_printf(m, "%s: ", engine->name);
bca44d80 1922 if (ce->state)
bf3783e5 1923 describe_obj(m, ce->state->obj);
dca33ecc 1924 if (ce->ring)
7e37f889 1925 describe_ctx_ring(m, ce->ring);
c9fe99bd 1926 seq_putc(m, '\n');
c9fe99bd 1927 }
a33afea5 1928
a33afea5 1929 seq_putc(m, '\n');
a168c293
BW
1930 }
1931
f3d28878 1932 mutex_unlock(&dev->struct_mutex);
e76d3630
BW
1933
1934 return 0;
1935}
1936
ea16a3cd
SV
1937static const char *swizzle_string(unsigned swizzle)
1938{
aee56cff 1939 switch (swizzle) {
ea16a3cd
SV
1940 case I915_BIT_6_SWIZZLE_NONE:
1941 return "none";
1942 case I915_BIT_6_SWIZZLE_9:
1943 return "bit9";
1944 case I915_BIT_6_SWIZZLE_9_10:
1945 return "bit9/bit10";
1946 case I915_BIT_6_SWIZZLE_9_11:
1947 return "bit9/bit11";
1948 case I915_BIT_6_SWIZZLE_9_10_11:
1949 return "bit9/bit10/bit11";
1950 case I915_BIT_6_SWIZZLE_9_17:
1951 return "bit9/bit17";
1952 case I915_BIT_6_SWIZZLE_9_10_17:
1953 return "bit9/bit10/bit17";
1954 case I915_BIT_6_SWIZZLE_UNKNOWN:
8a168ca7 1955 return "unknown";
ea16a3cd
SV
1956 }
1957
1958 return "bug";
1959}
1960
1961static int i915_swizzle_info(struct seq_file *m, void *data)
1962{
36cdd013 1963 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 1964 intel_wakeref_t wakeref;
22bcfc6a 1965
a037121c 1966 wakeref = intel_runtime_pm_get(dev_priv);
ea16a3cd 1967
ea16a3cd
SV
1968 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1969 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1970 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1971 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1972
f3ce44a0 1973 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
ea16a3cd
SV
1974 seq_printf(m, "DDC = 0x%08x\n",
1975 I915_READ(DCC));
656bfa3a
SV
1976 seq_printf(m, "DDC2 = 0x%08x\n",
1977 I915_READ(DCC2));
ea16a3cd
SV
1978 seq_printf(m, "C0DRB3 = 0x%04x\n",
1979 I915_READ16(C0DRB3));
1980 seq_printf(m, "C1DRB3 = 0x%04x\n",
1981 I915_READ16(C1DRB3));
36cdd013 1982 } else if (INTEL_GEN(dev_priv) >= 6) {
3fa7d235
SV
1983 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1984 I915_READ(MAD_DIMM_C0));
1985 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1986 I915_READ(MAD_DIMM_C1));
1987 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1988 I915_READ(MAD_DIMM_C2));
1989 seq_printf(m, "TILECTL = 0x%08x\n",
1990 I915_READ(TILECTL));
36cdd013 1991 if (INTEL_GEN(dev_priv) >= 8)
9d3203e1
BW
1992 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1993 I915_READ(GAMTARBMODE));
1994 else
1995 seq_printf(m, "ARB_MODE = 0x%08x\n",
1996 I915_READ(ARB_MODE));
3fa7d235
SV
1997 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1998 I915_READ(DISP_ARB_CTL));
ea16a3cd 1999 }
656bfa3a
SV
2000
2001 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2002 seq_puts(m, "L-shaped memory detected\n");
2003
a037121c 2004 intel_runtime_pm_put(dev_priv, wakeref);
ea16a3cd
SV
2005
2006 return 0;
2007}
2008
7466c291
CW
2009static const char *rps_power_to_str(unsigned int power)
2010{
2011 static const char * const strings[] = {
2012 [LOW_POWER] = "low power",
2013 [BETWEEN] = "mixed",
2014 [HIGH_POWER] = "high power",
2015 };
2016
2017 if (power >= ARRAY_SIZE(strings) || !strings[power])
2018 return "unknown";
2019
2020 return strings[power];
2021}
2022
1854d5ca
CW
2023static int i915_rps_boost_info(struct seq_file *m, void *data)
2024{
36cdd013
DW
2025 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2026 struct drm_device *dev = &dev_priv->drm;
562d9bae 2027 struct intel_rps *rps = &dev_priv->gt_pm.rps;
c0a6aa7e 2028 u32 act_freq = rps->cur_freq;
a037121c 2029 intel_wakeref_t wakeref;
1854d5ca 2030 struct drm_file *file;
1854d5ca 2031
d4225a53 2032 with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
c0a6aa7e
CW
2033 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2034 mutex_lock(&dev_priv->pcu_lock);
2035 act_freq = vlv_punit_read(dev_priv,
2036 PUNIT_REG_GPU_FREQ_STS);
2037 act_freq = (act_freq >> 8) & 0xff;
2038 mutex_unlock(&dev_priv->pcu_lock);
2039 } else {
2040 act_freq = intel_get_cagf(dev_priv,
2041 I915_READ(GEN6_RPSTAT1));
2042 }
c0a6aa7e
CW
2043 }
2044
562d9bae 2045 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
28176ef4
CW
2046 seq_printf(m, "GPU busy? %s [%d requests]\n",
2047 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
7b92c1bd 2048 seq_printf(m, "Boosts outstanding? %d\n",
562d9bae 2049 atomic_read(&rps->num_waiters));
60548c55 2050 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
c0a6aa7e
CW
2051 seq_printf(m, "Frequency requested %d, actual %d\n",
2052 intel_gpu_freq(dev_priv, rps->cur_freq),
2053 intel_gpu_freq(dev_priv, act_freq));
7466c291 2054 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
562d9bae
SAK
2055 intel_gpu_freq(dev_priv, rps->min_freq),
2056 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2057 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2058 intel_gpu_freq(dev_priv, rps->max_freq));
7466c291 2059 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
562d9bae
SAK
2060 intel_gpu_freq(dev_priv, rps->idle_freq),
2061 intel_gpu_freq(dev_priv, rps->efficient_freq),
2062 intel_gpu_freq(dev_priv, rps->boost_freq));
1d2ac403
SV
2063
2064 mutex_lock(&dev->filelist_mutex);
1854d5ca
CW
2065 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2066 struct drm_i915_file_private *file_priv = file->driver_priv;
2067 struct task_struct *task;
2068
2069 rcu_read_lock();
2070 task = pid_task(file->pid, PIDTYPE_PID);
7b92c1bd 2071 seq_printf(m, "%s [%d]: %d boosts\n",
1854d5ca
CW
2072 task ? task->comm : "<unknown>",
2073 task ? task->pid : -1,
562d9bae 2074 atomic_read(&file_priv->rps_client.boosts));
1854d5ca
CW
2075 rcu_read_unlock();
2076 }
7b92c1bd 2077 seq_printf(m, "Kernel (anonymous) boosts: %d\n",
562d9bae 2078 atomic_read(&rps->boosts));
1d2ac403 2079 mutex_unlock(&dev->filelist_mutex);
1854d5ca 2080
7466c291 2081 if (INTEL_GEN(dev_priv) >= 6 &&
562d9bae 2082 rps->enabled &&
28176ef4 2083 dev_priv->gt.active_requests) {
7466c291
CW
2084 u32 rpup, rpupei;
2085 u32 rpdown, rpdownei;
2086
2087 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2088 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2089 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2090 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2091 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2092 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2093
2094 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
60548c55 2095 rps_power_to_str(rps->power.mode));
7466c291 2096 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
23f4a287 2097 rpup && rpupei ? 100 * rpup / rpupei : 0,
60548c55 2098 rps->power.up_threshold);
7466c291 2099 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
23f4a287 2100 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
60548c55 2101 rps->power.down_threshold);
7466c291
CW
2102 } else {
2103 seq_puts(m, "\nRPS Autotuning inactive\n");
2104 }
2105
8d3afd7d 2106 return 0;
1854d5ca
CW
2107}
2108
63573eb7
BW
2109static int i915_llc(struct seq_file *m, void *data)
2110{
36cdd013 2111 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3accaf7e 2112 const bool edram = INTEL_GEN(dev_priv) > 8;
63573eb7 2113
36cdd013 2114 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
3accaf7e
MK
2115 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2116 intel_uncore_edram_size(dev_priv)/1024/1024);
63573eb7
BW
2117
2118 return 0;
2119}
2120
0509ead1
AS
2121static int i915_huc_load_status_info(struct seq_file *m, void *data)
2122{
2123 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 2124 intel_wakeref_t wakeref;
56ffc742 2125 struct drm_printer p;
0509ead1 2126
ab309a6a
MW
2127 if (!HAS_HUC(dev_priv))
2128 return -ENODEV;
0509ead1 2129
56ffc742
MW
2130 p = drm_seq_file_printer(m);
2131 intel_uc_fw_dump(&dev_priv->huc.fw, &p);
0509ead1 2132
d4225a53
CW
2133 with_intel_runtime_pm(dev_priv, wakeref)
2134 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
0509ead1
AS
2135
2136 return 0;
2137}
2138
fdf5d357
AD
2139static int i915_guc_load_status_info(struct seq_file *m, void *data)
2140{
36cdd013 2141 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 2142 intel_wakeref_t wakeref;
56ffc742 2143 struct drm_printer p;
fdf5d357 2144
ab309a6a
MW
2145 if (!HAS_GUC(dev_priv))
2146 return -ENODEV;
fdf5d357 2147
56ffc742
MW
2148 p = drm_seq_file_printer(m);
2149 intel_uc_fw_dump(&dev_priv->guc.fw, &p);
fdf5d357 2150
d4225a53
CW
2151 with_intel_runtime_pm(dev_priv, wakeref) {
2152 u32 tmp = I915_READ(GUC_STATUS);
2153 u32 i;
2154
2155 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2156 seq_printf(m, "\tBootrom status = 0x%x\n",
2157 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2158 seq_printf(m, "\tuKernel status = 0x%x\n",
2159 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2160 seq_printf(m, "\tMIA Core status = 0x%x\n",
2161 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2162 seq_puts(m, "\nScratch registers:\n");
2163 for (i = 0; i < 16; i++) {
2164 seq_printf(m, "\t%2d: \t0x%x\n",
2165 i, I915_READ(SOFT_SCRATCH(i)));
2166 }
2167 }
3582ad13 2168
fdf5d357
AD
2169 return 0;
2170}
2171
5e24e4a2
MW
2172static const char *
2173stringify_guc_log_type(enum guc_log_buffer_type type)
2174{
2175 switch (type) {
2176 case GUC_ISR_LOG_BUFFER:
2177 return "ISR";
2178 case GUC_DPC_LOG_BUFFER:
2179 return "DPC";
2180 case GUC_CRASH_DUMP_LOG_BUFFER:
2181 return "CRASH";
2182 default:
2183 MISSING_CASE(type);
2184 }
2185
2186 return "";
2187}
2188
5aa1ee4b
AG
2189static void i915_guc_log_info(struct seq_file *m,
2190 struct drm_i915_private *dev_priv)
2191{
5e24e4a2
MW
2192 struct intel_guc_log *log = &dev_priv->guc.log;
2193 enum guc_log_buffer_type type;
5aa1ee4b 2194
5e24e4a2
MW
2195 if (!intel_guc_log_relay_enabled(log)) {
2196 seq_puts(m, "GuC log relay disabled\n");
2197 return;
2198 }
5aa1ee4b 2199
5e24e4a2 2200 seq_puts(m, "GuC logging stats:\n");
5aa1ee4b 2201
6a96be24 2202 seq_printf(m, "\tRelay full count: %u\n",
5e24e4a2
MW
2203 log->relay.full_count);
2204
2205 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2206 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2207 stringify_guc_log_type(type),
2208 log->stats[type].flush,
2209 log->stats[type].sampled_overflow);
2210 }
5aa1ee4b
AG
2211}
2212
8b417c26
DG
2213static void i915_guc_client_info(struct seq_file *m,
2214 struct drm_i915_private *dev_priv,
5afc8b49 2215 struct intel_guc_client *client)
8b417c26 2216{
e2f80391 2217 struct intel_engine_cs *engine;
c18468c4 2218 enum intel_engine_id id;
e5315213 2219 u64 tot = 0;
8b417c26 2220
b09935a6
OM
2221 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2222 client->priority, client->stage_id, client->proc_desc_offset);
59db36cf
MW
2223 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2224 client->doorbell_id, client->doorbell_offset);
8b417c26 2225
3b3f1650 2226 for_each_engine(engine, dev_priv, id) {
c18468c4
DG
2227 u64 submissions = client->submissions[id];
2228 tot += submissions;
8b417c26 2229 seq_printf(m, "\tSubmissions: %llu %s\n",
c18468c4 2230 submissions, engine->name);
8b417c26
DG
2231 }
2232 seq_printf(m, "\tTotal: %llu\n", tot);
2233}
2234
a8b9370f
OM
2235static int i915_guc_info(struct seq_file *m, void *data)
2236{
2237 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2238 const struct intel_guc *guc = &dev_priv->guc;
a8b9370f 2239
db557993 2240 if (!USES_GUC(dev_priv))
ab309a6a
MW
2241 return -ENODEV;
2242
db557993
MW
2243 i915_guc_log_info(m, dev_priv);
2244
2245 if (!USES_GUC_SUBMISSION(dev_priv))
2246 return 0;
2247
ab309a6a 2248 GEM_BUG_ON(!guc->execbuf_client);
a8b9370f 2249
db557993 2250 seq_printf(m, "\nDoorbell map:\n");
abddffdf 2251 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
db557993 2252 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
9636f6db 2253
334636c6
CW
2254 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2255 i915_guc_client_info(m, dev_priv, guc->execbuf_client);
e78c9175
CW
2256 if (guc->preempt_client) {
2257 seq_printf(m, "\nGuC preempt client @ %p:\n",
2258 guc->preempt_client);
2259 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2260 }
8b417c26
DG
2261
2262 /* Add more as required ... */
2263
2264 return 0;
2265}
2266
a8b9370f 2267static int i915_guc_stage_pool(struct seq_file *m, void *data)
4c7e77fc 2268{
36cdd013 2269 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a8b9370f
OM
2270 const struct intel_guc *guc = &dev_priv->guc;
2271 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
5afc8b49 2272 struct intel_guc_client *client = guc->execbuf_client;
a8b9370f
OM
2273 unsigned int tmp;
2274 int index;
4c7e77fc 2275
ab309a6a
MW
2276 if (!USES_GUC_SUBMISSION(dev_priv))
2277 return -ENODEV;
4c7e77fc 2278
a8b9370f
OM
2279 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2280 struct intel_engine_cs *engine;
2281
2282 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2283 continue;
2284
2285 seq_printf(m, "GuC stage descriptor %u:\n", index);
2286 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2287 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2288 seq_printf(m, "\tPriority: %d\n", desc->priority);
2289 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2290 seq_printf(m, "\tEngines used: 0x%x\n",
2291 desc->engines_used);
2292 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2293 desc->db_trigger_phy,
2294 desc->db_trigger_cpu,
2295 desc->db_trigger_uk);
2296 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2297 desc->process_desc);
9a09485d 2298 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
a8b9370f
OM
2299 desc->wq_addr, desc->wq_size);
2300 seq_putc(m, '\n');
2301
2302 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2303 u32 guc_engine_id = engine->guc_id;
2304 struct guc_execlist_context *lrc =
2305 &desc->lrc[guc_engine_id];
2306
2307 seq_printf(m, "\t%s LRC:\n", engine->name);
2308 seq_printf(m, "\t\tContext desc: 0x%x\n",
2309 lrc->context_desc);
2310 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2311 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2312 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2313 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2314 seq_putc(m, '\n');
2315 }
2316 }
2317
2318 return 0;
2319}
2320
4c7e77fc
AD
2321static int i915_guc_log_dump(struct seq_file *m, void *data)
2322{
ac58d2ab
DCS
2323 struct drm_info_node *node = m->private;
2324 struct drm_i915_private *dev_priv = node_to_i915(node);
2325 bool dump_load_err = !!node->info_ent->data;
2326 struct drm_i915_gem_object *obj = NULL;
2327 u32 *log;
2328 int i = 0;
4c7e77fc 2329
ab309a6a
MW
2330 if (!HAS_GUC(dev_priv))
2331 return -ENODEV;
2332
ac58d2ab
DCS
2333 if (dump_load_err)
2334 obj = dev_priv->guc.load_err_log;
2335 else if (dev_priv->guc.log.vma)
2336 obj = dev_priv->guc.log.vma->obj;
4c7e77fc 2337
ac58d2ab
DCS
2338 if (!obj)
2339 return 0;
4c7e77fc 2340
ac58d2ab
DCS
2341 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2342 if (IS_ERR(log)) {
2343 DRM_DEBUG("Failed to pin object\n");
2344 seq_puts(m, "(log data unaccessible)\n");
2345 return PTR_ERR(log);
4c7e77fc
AD
2346 }
2347
ac58d2ab
DCS
2348 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2349 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2350 *(log + i), *(log + i + 1),
2351 *(log + i + 2), *(log + i + 3));
2352
4c7e77fc
AD
2353 seq_putc(m, '\n');
2354
ac58d2ab
DCS
2355 i915_gem_object_unpin_map(obj);
2356
4c7e77fc
AD
2357 return 0;
2358}
2359
4977a287 2360static int i915_guc_log_level_get(void *data, u64 *val)
685534ef 2361{
bcc36d8a 2362 struct drm_i915_private *dev_priv = data;
685534ef 2363
86aa8247 2364 if (!USES_GUC(dev_priv))
ab309a6a
MW
2365 return -ENODEV;
2366
50935ac7 2367 *val = intel_guc_log_get_level(&dev_priv->guc.log);
685534ef
SAK
2368
2369 return 0;
2370}
2371
4977a287 2372static int i915_guc_log_level_set(void *data, u64 val)
685534ef 2373{
bcc36d8a 2374 struct drm_i915_private *dev_priv = data;
685534ef 2375
86aa8247 2376 if (!USES_GUC(dev_priv))
ab309a6a
MW
2377 return -ENODEV;
2378
50935ac7 2379 return intel_guc_log_set_level(&dev_priv->guc.log, val);
685534ef
SAK
2380}
2381
4977a287
MW
2382DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2383 i915_guc_log_level_get, i915_guc_log_level_set,
685534ef
SAK
2384 "%lld\n");
2385
4977a287
MW
2386static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2387{
2388 struct drm_i915_private *dev_priv = inode->i_private;
2389
2390 if (!USES_GUC(dev_priv))
2391 return -ENODEV;
2392
2393 file->private_data = &dev_priv->guc.log;
2394
2395 return intel_guc_log_relay_open(&dev_priv->guc.log);
2396}
2397
2398static ssize_t
2399i915_guc_log_relay_write(struct file *filp,
2400 const char __user *ubuf,
2401 size_t cnt,
2402 loff_t *ppos)
2403{
2404 struct intel_guc_log *log = filp->private_data;
2405
2406 intel_guc_log_relay_flush(log);
2407
2408 return cnt;
2409}
2410
2411static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2412{
2413 struct drm_i915_private *dev_priv = inode->i_private;
2414
2415 intel_guc_log_relay_close(&dev_priv->guc.log);
2416
2417 return 0;
2418}
2419
2420static const struct file_operations i915_guc_log_relay_fops = {
2421 .owner = THIS_MODULE,
2422 .open = i915_guc_log_relay_open,
2423 .write = i915_guc_log_relay_write,
2424 .release = i915_guc_log_relay_release,
2425};
2426
5b7b3086
DP
2427static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2428{
2429 u8 val;
2430 static const char * const sink_status[] = {
2431 "inactive",
2432 "transition to active, capture and display",
2433 "active, display from RFB",
2434 "active, capture and display on sink device timings",
2435 "transition to inactive, capture and display, timing re-sync",
2436 "reserved",
2437 "reserved",
2438 "sink internal error",
2439 };
2440 struct drm_connector *connector = m->private;
7a72c78b 2441 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5b7b3086
DP
2442 struct intel_dp *intel_dp =
2443 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
7a72c78b
RV
2444 int ret;
2445
2446 if (!CAN_PSR(dev_priv)) {
2447 seq_puts(m, "PSR Unsupported\n");
2448 return -ENODEV;
2449 }
5b7b3086
DP
2450
2451 if (connector->status != connector_status_connected)
2452 return -ENODEV;
2453
7a72c78b
RV
2454 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2455
2456 if (ret == 1) {
5b7b3086
DP
2457 const char *str = "unknown";
2458
2459 val &= DP_PSR_SINK_STATE_MASK;
2460 if (val < ARRAY_SIZE(sink_status))
2461 str = sink_status[val];
2462 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2463 } else {
7a72c78b 2464 return ret;
5b7b3086
DP
2465 }
2466
2467 return 0;
2468}
2469DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2470
00b06296
VN
2471static void
2472psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2473{
47c6cd54
JRS
2474 u32 val, status_val;
2475 const char *status = "unknown";
b86bef20 2476
00b06296
VN
2477 if (dev_priv->psr.psr2_enabled) {
2478 static const char * const live_status[] = {
2479 "IDLE",
2480 "CAPTURE",
2481 "CAPTURE_FS",
2482 "SLEEP",
2483 "BUFON_FW",
2484 "ML_UP",
2485 "SU_STANDBY",
2486 "FAST_SLEEP",
2487 "DEEP_SLEEP",
2488 "BUF_ON",
2489 "TG_ON"
2490 };
47c6cd54
JRS
2491 val = I915_READ(EDP_PSR2_STATUS);
2492 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2493 EDP_PSR2_STATUS_STATE_SHIFT;
2494 if (status_val < ARRAY_SIZE(live_status))
2495 status = live_status[status_val];
00b06296
VN
2496 } else {
2497 static const char * const live_status[] = {
2498 "IDLE",
2499 "SRDONACK",
2500 "SRDENT",
2501 "BUFOFF",
2502 "BUFON",
2503 "AUXACK",
2504 "SRDOFFACK",
2505 "SRDENT_ON",
2506 };
47c6cd54
JRS
2507 val = I915_READ(EDP_PSR_STATUS);
2508 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2509 EDP_PSR_STATUS_STATE_SHIFT;
2510 if (status_val < ARRAY_SIZE(live_status))
2511 status = live_status[status_val];
00b06296 2512 }
b86bef20 2513
47c6cd54 2514 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
b86bef20
CW
2515}
2516
e91fd8c6
RV
2517static int i915_edp_psr_status(struct seq_file *m, void *data)
2518{
36cdd013 2519 struct drm_i915_private *dev_priv = node_to_i915(m->private);
47c6cd54 2520 struct i915_psr *psr = &dev_priv->psr;
a037121c 2521 intel_wakeref_t wakeref;
47c6cd54
JRS
2522 const char *status;
2523 bool enabled;
2524 u32 val;
e91fd8c6 2525
ab309a6a
MW
2526 if (!HAS_PSR(dev_priv))
2527 return -ENODEV;
3553a8ea 2528
47c6cd54
JRS
2529 seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2530 if (psr->dp)
2531 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2532 seq_puts(m, "\n");
2533
2534 if (!psr->sink_support)
c9ef291a
DP
2535 return 0;
2536
a037121c 2537 wakeref = intel_runtime_pm_get(dev_priv);
47c6cd54 2538 mutex_lock(&psr->lock);
c8c8fb33 2539
47c6cd54
JRS
2540 if (psr->enabled)
2541 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
ce3508fd 2542 else
47c6cd54
JRS
2543 status = "disabled";
2544 seq_printf(m, "PSR mode: %s\n", status);
60e5ffe3 2545
47c6cd54
JRS
2546 if (!psr->enabled)
2547 goto unlock;
60e5ffe3 2548
47c6cd54
JRS
2549 if (psr->psr2_enabled) {
2550 val = I915_READ(EDP_PSR2_CTL);
2551 enabled = val & EDP_PSR2_ENABLE;
2552 } else {
2553 val = I915_READ(EDP_PSR_CTL);
2554 enabled = val & EDP_PSR_ENABLE;
2555 }
2556 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2557 enableddisabled(enabled), val);
2558 psr_source_status(dev_priv, m);
2559 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2560 psr->busy_frontbuffer_bits);
e91fd8c6 2561
05eec3c2 2562 /*
05eec3c2
RV
2563 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2564 */
36cdd013 2565 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
47c6cd54
JRS
2566 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2567 seq_printf(m, "Performance counter: %u\n", val);
a6cbdb8e 2568 }
b86bef20 2569
47c6cd54 2570 if (psr->debug & I915_PSR_DEBUG_IRQ) {
3f983e54 2571 seq_printf(m, "Last attempted entry at: %lld\n",
47c6cd54
JRS
2572 psr->last_entry_attempt);
2573 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3f983e54
DP
2574 }
2575
a81f781a
JRS
2576 if (psr->psr2_enabled) {
2577 u32 su_frames_val[3];
2578 int frame;
2579
2580 /*
2581 * Reading all 3 registers before hand to minimize crossing a
2582 * frame boundary between register reads
2583 */
2584 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2585 su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2586
2587 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2588
2589 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2590 u32 su_blocks;
2591
2592 su_blocks = su_frames_val[frame / 3] &
2593 PSR2_SU_STATUS_MASK(frame);
2594 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2595 seq_printf(m, "%d\t%d\n", frame, su_blocks);
2596 }
2597 }
2598
47c6cd54
JRS
2599unlock:
2600 mutex_unlock(&psr->lock);
a037121c 2601 intel_runtime_pm_put(dev_priv, wakeref);
47c6cd54 2602
e91fd8c6
RV
2603 return 0;
2604}
2605
54fd3149
DP
2606static int
2607i915_edp_psr_debug_set(void *data, u64 val)
2608{
2609 struct drm_i915_private *dev_priv = data;
c44301fc 2610 struct drm_modeset_acquire_ctx ctx;
a037121c 2611 intel_wakeref_t wakeref;
c44301fc 2612 int ret;
54fd3149
DP
2613
2614 if (!CAN_PSR(dev_priv))
2615 return -ENODEV;
2616
c44301fc 2617 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
54fd3149 2618
a037121c 2619 wakeref = intel_runtime_pm_get(dev_priv);
c44301fc
ML
2620
2621 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2622
2623retry:
2624 ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
2625 if (ret == -EDEADLK) {
2626 ret = drm_modeset_backoff(&ctx);
2627 if (!ret)
2628 goto retry;
2629 }
2630
2631 drm_modeset_drop_locks(&ctx);
2632 drm_modeset_acquire_fini(&ctx);
2633
a037121c 2634 intel_runtime_pm_put(dev_priv, wakeref);
54fd3149 2635
c44301fc 2636 return ret;
54fd3149
DP
2637}
2638
2639static int
2640i915_edp_psr_debug_get(void *data, u64 *val)
2641{
2642 struct drm_i915_private *dev_priv = data;
2643
2644 if (!CAN_PSR(dev_priv))
2645 return -ENODEV;
2646
2647 *val = READ_ONCE(dev_priv->psr.debug);
2648 return 0;
2649}
2650
2651DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2652 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2653 "%llu\n");
2654
ec013e7f
JB
2655static int i915_energy_uJ(struct seq_file *m, void *data)
2656{
36cdd013 2657 struct drm_i915_private *dev_priv = node_to_i915(m->private);
d38014ea 2658 unsigned long long power;
a037121c 2659 intel_wakeref_t wakeref;
ec013e7f
JB
2660 u32 units;
2661
36cdd013 2662 if (INTEL_GEN(dev_priv) < 6)
ec013e7f
JB
2663 return -ENODEV;
2664
d4225a53 2665 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
d38014ea 2666 return -ENODEV;
d38014ea
GKB
2667
2668 units = (power & 0x1f00) >> 8;
d4225a53
CW
2669 with_intel_runtime_pm(dev_priv, wakeref)
2670 power = I915_READ(MCH_SECP_NRG_STTS);
36623ef8 2671
d4225a53 2672 power = (1000000 * power) >> units; /* convert to uJ */
d38014ea 2673 seq_printf(m, "%llu", power);
371db66a
PZ
2674
2675 return 0;
2676}
2677
6455c870 2678static int i915_runtime_pm_status(struct seq_file *m, void *unused)
371db66a 2679{
36cdd013 2680 struct drm_i915_private *dev_priv = node_to_i915(m->private);
52a05c30 2681 struct pci_dev *pdev = dev_priv->drm.pdev;
371db66a 2682
a156e64d
CW
2683 if (!HAS_RUNTIME_PM(dev_priv))
2684 seq_puts(m, "Runtime power management not supported\n");
371db66a 2685
25c896bd
CW
2686 seq_printf(m, "Runtime power status: %s\n",
2687 enableddisabled(!dev_priv->power_domains.wakeref));
2688
6f56103d
CW
2689 seq_printf(m, "GPU idle: %s (epoch %u)\n",
2690 yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
371db66a 2691 seq_printf(m, "IRQs disabled: %s\n",
9df7575f 2692 yesno(!intel_irqs_enabled(dev_priv)));
0d804184 2693#ifdef CONFIG_PM
a6aaec8b 2694 seq_printf(m, "Usage count: %d\n",
36cdd013 2695 atomic_read(&dev_priv->drm.dev->power.usage_count));
0d804184
CW
2696#else
2697 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2698#endif
a156e64d 2699 seq_printf(m, "PCI device power state: %s [%d]\n",
52a05c30
DW
2700 pci_power_name(pdev->current_state),
2701 pdev->current_state);
371db66a 2702
bd780f37
CW
2703 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2704 struct drm_printer p = drm_seq_file_printer(m);
2705
2706 print_intel_runtime_pm_wakeref(dev_priv, &p);
2707 }
2708
ec013e7f
JB
2709 return 0;
2710}
2711
1da51581
ID
2712static int i915_power_domain_info(struct seq_file *m, void *unused)
2713{
36cdd013 2714 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1da51581
ID
2715 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2716 int i;
2717
2718 mutex_lock(&power_domains->lock);
2719
2720 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2721 for (i = 0; i < power_domains->power_well_count; i++) {
2722 struct i915_power_well *power_well;
2723 enum intel_display_power_domain power_domain;
2724
2725 power_well = &power_domains->power_wells[i];
f28ec6f4 2726 seq_printf(m, "%-25s %d\n", power_well->desc->name,
1da51581
ID
2727 power_well->count);
2728
f28ec6f4 2729 for_each_power_domain(power_domain, power_well->desc->domains)
1da51581 2730 seq_printf(m, " %-23s %d\n",
9895ad03 2731 intel_display_power_domain_str(power_domain),
1da51581 2732 power_domains->domain_use_count[power_domain]);
1da51581
ID
2733 }
2734
2735 mutex_unlock(&power_domains->lock);
2736
2737 return 0;
2738}
2739
b7cec66d
DL
2740static int i915_dmc_info(struct seq_file *m, void *unused)
2741{
36cdd013 2742 struct drm_i915_private *dev_priv = node_to_i915(m->private);
a037121c 2743 intel_wakeref_t wakeref;
b7cec66d
DL
2744 struct intel_csr *csr;
2745
ab309a6a
MW
2746 if (!HAS_CSR(dev_priv))
2747 return -ENODEV;
b7cec66d
DL
2748
2749 csr = &dev_priv->csr;
2750
a037121c 2751 wakeref = intel_runtime_pm_get(dev_priv);
6fb403de 2752
b7cec66d
DL
2753 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2754 seq_printf(m, "path: %s\n", csr->fw_path);
2755
2756 if (!csr->dmc_payload)
6fb403de 2757 goto out;
b7cec66d
DL
2758
2759 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2760 CSR_VERSION_MINOR(csr->version));
2761
34b2f8da
ID
2762 if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2763 goto out;
2764
2765 seq_printf(m, "DC3 -> DC5 count: %d\n",
2766 I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2767 SKL_CSR_DC3_DC5_COUNT));
2768 if (!IS_GEN9_LP(dev_priv))
8337206d
DL
2769 seq_printf(m, "DC5 -> DC6 count: %d\n",
2770 I915_READ(SKL_CSR_DC5_DC6_COUNT));
8337206d 2771
6fb403de
MK
2772out:
2773 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2774 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2775 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2776
a037121c 2777 intel_runtime_pm_put(dev_priv, wakeref);
8337206d 2778
b7cec66d
DL
2779 return 0;
2780}
2781
53f5e3ca
JB
2782static void intel_seq_print_mode(struct seq_file *m, int tabs,
2783 struct drm_display_mode *mode)
2784{
2785 int i;
2786
2787 for (i = 0; i < tabs; i++)
2788 seq_putc(m, '\t');
2789
4fb6bb89 2790 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
53f5e3ca
JB
2791}
2792
2793static void intel_encoder_info(struct seq_file *m,
2794 struct intel_crtc *intel_crtc,
2795 struct intel_encoder *intel_encoder)
2796{
36cdd013
DW
2797 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2798 struct drm_device *dev = &dev_priv->drm;
53f5e3ca
JB
2799 struct drm_crtc *crtc = &intel_crtc->base;
2800 struct intel_connector *intel_connector;
2801 struct drm_encoder *encoder;
2802
2803 encoder = &intel_encoder->base;
2804 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
8e329a03 2805 encoder->base.id, encoder->name);
53f5e3ca
JB
2806 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2807 struct drm_connector *connector = &intel_connector->base;
2808 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2809 connector->base.id,
c23cc417 2810 connector->name,
53f5e3ca
JB
2811 drm_get_connector_status_name(connector->status));
2812 if (connector->status == connector_status_connected) {
2813 struct drm_display_mode *mode = &crtc->mode;
2814 seq_printf(m, ", mode:\n");
2815 intel_seq_print_mode(m, 2, mode);
2816 } else {
2817 seq_putc(m, '\n');
2818 }
2819 }
2820}
2821
2822static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2823{
36cdd013
DW
2824 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2825 struct drm_device *dev = &dev_priv->drm;
53f5e3ca
JB
2826 struct drm_crtc *crtc = &intel_crtc->base;
2827 struct intel_encoder *intel_encoder;
23a48d53
ML
2828 struct drm_plane_state *plane_state = crtc->primary->state;
2829 struct drm_framebuffer *fb = plane_state->fb;
53f5e3ca 2830
23a48d53 2831 if (fb)
5aa8a937 2832 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
23a48d53
ML
2833 fb->base.id, plane_state->src_x >> 16,
2834 plane_state->src_y >> 16, fb->width, fb->height);
5aa8a937
MR
2835 else
2836 seq_puts(m, "\tprimary plane disabled\n");
53f5e3ca
JB
2837 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2838 intel_encoder_info(m, intel_crtc, intel_encoder);
2839}
2840
2841static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2842{
2843 struct drm_display_mode *mode = panel->fixed_mode;
2844
2845 seq_printf(m, "\tfixed mode:\n");
2846 intel_seq_print_mode(m, 2, mode);
2847}
2848
2849static void intel_dp_info(struct seq_file *m,
2850 struct intel_connector *intel_connector)
2851{
2852 struct intel_encoder *intel_encoder = intel_connector->encoder;
2853 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2854
2855 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
742f491d 2856 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
b6dabe3b 2857 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
53f5e3ca 2858 intel_panel_info(m, &intel_connector->panel);
80209e5f
MK
2859
2860 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2861 &intel_dp->aux);
53f5e3ca
JB
2862}
2863
9a148a96
LY
2864static void intel_dp_mst_info(struct seq_file *m,
2865 struct intel_connector *intel_connector)
2866{
2867 struct intel_encoder *intel_encoder = intel_connector->encoder;
2868 struct intel_dp_mst_encoder *intel_mst =
2869 enc_to_mst(&intel_encoder->base);
2870 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2871 struct intel_dp *intel_dp = &intel_dig_port->dp;
2872 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2873 intel_connector->port);
2874
2875 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2876}
2877
53f5e3ca
JB
2878static void intel_hdmi_info(struct seq_file *m,
2879 struct intel_connector *intel_connector)
2880{
2881 struct intel_encoder *intel_encoder = intel_connector->encoder;
2882 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2883
742f491d 2884 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
53f5e3ca
JB
2885}
2886
2887static void intel_lvds_info(struct seq_file *m,
2888 struct intel_connector *intel_connector)
2889{
2890 intel_panel_info(m, &intel_connector->panel);
2891}
2892
2893static void intel_connector_info(struct seq_file *m,
2894 struct drm_connector *connector)
2895{
2896 struct intel_connector *intel_connector = to_intel_connector(connector);
2897 struct intel_encoder *intel_encoder = intel_connector->encoder;
f103fc7d 2898 struct drm_display_mode *mode;
53f5e3ca
JB
2899
2900 seq_printf(m, "connector %d: type %s, status: %s\n",
c23cc417 2901 connector->base.id, connector->name,
53f5e3ca 2902 drm_get_connector_status_name(connector->status));
3e037f9b
JRS
2903
2904 if (connector->status == connector_status_disconnected)
2905 return;
2906
2907 seq_printf(m, "\tname: %s\n", connector->display_info.name);
2908 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2909 connector->display_info.width_mm,
2910 connector->display_info.height_mm);
2911 seq_printf(m, "\tsubpixel order: %s\n",
2912 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2913 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
ee648a74 2914
77d1f615 2915 if (!intel_encoder)
ee648a74
ML
2916 return;
2917
2918 switch (connector->connector_type) {
2919 case DRM_MODE_CONNECTOR_DisplayPort:
2920 case DRM_MODE_CONNECTOR_eDP:
9a148a96
LY
2921 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2922 intel_dp_mst_info(m, intel_connector);
2923 else
2924 intel_dp_info(m, intel_connector);
ee648a74
ML
2925 break;
2926 case DRM_MODE_CONNECTOR_LVDS:
2927 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
36cd7444 2928 intel_lvds_info(m, intel_connector);
ee648a74
ML
2929 break;
2930 case DRM_MODE_CONNECTOR_HDMIA:
2931 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
7e732cac 2932 intel_encoder->type == INTEL_OUTPUT_DDI)
ee648a74
ML
2933 intel_hdmi_info(m, intel_connector);
2934 break;
2935 default:
2936 break;
36cd7444 2937 }
53f5e3ca 2938
f103fc7d
JB
2939 seq_printf(m, "\tmodes:\n");
2940 list_for_each_entry(mode, &connector->modes, head)
2941 intel_seq_print_mode(m, 2, mode);
53f5e3ca
JB
2942}
2943
3abc4e09
RF
2944static const char *plane_type(enum drm_plane_type type)
2945{
2946 switch (type) {
2947 case DRM_PLANE_TYPE_OVERLAY:
2948 return "OVL";
2949 case DRM_PLANE_TYPE_PRIMARY:
2950 return "PRI";
2951 case DRM_PLANE_TYPE_CURSOR:
2952 return "CUR";
2953 /*
2954 * Deliberately omitting default: to generate compiler warnings
2955 * when a new drm_plane_type gets added.
2956 */
2957 }
2958
2959 return "unknown";
2960}
2961
5852a15c 2962static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
3abc4e09 2963{
3abc4e09 2964 /*
c2c446ad 2965 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3abc4e09
RF
2966 * will print them all to visualize if the values are misused
2967 */
5852a15c 2968 snprintf(buf, bufsize,
3abc4e09 2969 "%s%s%s%s%s%s(0x%08x)",
c2c446ad
RF
2970 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2971 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2972 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2973 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2974 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2975 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3abc4e09 2976 rotation);
3abc4e09
RF
2977}
2978
2979static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2980{
36cdd013
DW
2981 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2982 struct drm_device *dev = &dev_priv->drm;
3abc4e09
RF
2983 struct intel_plane *intel_plane;
2984
2985 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2986 struct drm_plane_state *state;
2987 struct drm_plane *plane = &intel_plane->base;
b3c11ac2 2988 struct drm_format_name_buf format_name;
5852a15c 2989 char rot_str[48];
3abc4e09
RF
2990
2991 if (!plane->state) {
2992 seq_puts(m, "plane->state is NULL!\n");
2993 continue;
2994 }
2995
2996 state = plane->state;
2997
90844f00 2998 if (state->fb) {
438b74a5
VS
2999 drm_get_format_name(state->fb->format->format,
3000 &format_name);
90844f00 3001 } else {
b3c11ac2 3002 sprintf(format_name.str, "N/A");
90844f00
EE
3003 }
3004
5852a15c
JN
3005 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
3006
3abc4e09
RF
3007 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3008 plane->base.id,
3009 plane_type(intel_plane->base.type),
3010 state->crtc_x, state->crtc_y,
3011 state->crtc_w, state->crtc_h,
3012 (state->src_x >> 16),
3013 ((state->src_x & 0xffff) * 15625) >> 10,
3014 (state->src_y >> 16),
3015 ((state->src_y & 0xffff) * 15625) >> 10,
3016 (state->src_w >> 16),
3017 ((state->src_w & 0xffff) * 15625) >> 10,
3018 (state->src_h >> 16),
3019 ((state->src_h & 0xffff) * 15625) >> 10,
b3c11ac2 3020 format_name.str,
5852a15c 3021 rot_str);
3abc4e09
RF
3022 }
3023}
3024
3025static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3026{
3027 struct intel_crtc_state *pipe_config;
3028 int num_scalers = intel_crtc->num_scalers;
3029 int i;
3030
3031 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3032
3033 /* Not all platformas have a scaler */
3034 if (num_scalers) {
3035 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3036 num_scalers,
3037 pipe_config->scaler_state.scaler_users,
3038 pipe_config->scaler_state.scaler_id);
3039
58415918 3040 for (i = 0; i < num_scalers; i++) {
3abc4e09
RF
3041 struct intel_scaler *sc =
3042 &pipe_config->scaler_state.scalers[i];
3043
3044 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3045 i, yesno(sc->in_use), sc->mode);
3046 }
3047 seq_puts(m, "\n");
3048 } else {
3049 seq_puts(m, "\tNo scalers available on this platform\n");
3050 }
3051}
3052
53f5e3ca
JB
3053static int i915_display_info(struct seq_file *m, void *unused)
3054{
36cdd013
DW
3055 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3056 struct drm_device *dev = &dev_priv->drm;
065f2ec2 3057 struct intel_crtc *crtc;
53f5e3ca 3058 struct drm_connector *connector;
3f6a5e1e 3059 struct drm_connector_list_iter conn_iter;
a037121c
CW
3060 intel_wakeref_t wakeref;
3061
3062 wakeref = intel_runtime_pm_get(dev_priv);
53f5e3ca 3063
53f5e3ca
JB
3064 seq_printf(m, "CRTC info\n");
3065 seq_printf(m, "---------\n");
d3fcc808 3066 for_each_intel_crtc(dev, crtc) {
f77076c9 3067 struct intel_crtc_state *pipe_config;
53f5e3ca 3068
3f6a5e1e 3069 drm_modeset_lock(&crtc->base.mutex, NULL);
f77076c9
ML
3070 pipe_config = to_intel_crtc_state(crtc->base.state);
3071
3abc4e09 3072 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
065f2ec2 3073 crtc->base.base.id, pipe_name(crtc->pipe),
f77076c9 3074 yesno(pipe_config->base.active),
3abc4e09
RF
3075 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3076 yesno(pipe_config->dither), pipe_config->pipe_bpp);
3077
f77076c9 3078 if (pipe_config->base.active) {
cd5dcbf1
VS
3079 struct intel_plane *cursor =
3080 to_intel_plane(crtc->base.cursor);
3081
065f2ec2
CW
3082 intel_crtc_info(m, crtc);
3083
cd5dcbf1
VS
3084 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3085 yesno(cursor->base.state->visible),
3086 cursor->base.state->crtc_x,
3087 cursor->base.state->crtc_y,
3088 cursor->base.state->crtc_w,
3089 cursor->base.state->crtc_h,
3090 cursor->cursor.base);
3abc4e09
RF
3091 intel_scaler_info(m, crtc);
3092 intel_plane_info(m, crtc);
a23dc658 3093 }
cace841c
SV
3094
3095 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3096 yesno(!crtc->cpu_fifo_underrun_disabled),
3097 yesno(!crtc->pch_fifo_underrun_disabled));
3f6a5e1e 3098 drm_modeset_unlock(&crtc->base.mutex);
53f5e3ca
JB
3099 }
3100
3101 seq_printf(m, "\n");
3102 seq_printf(m, "Connector info\n");
3103 seq_printf(m, "--------------\n");
3f6a5e1e
SV
3104 mutex_lock(&dev->mode_config.mutex);
3105 drm_connector_list_iter_begin(dev, &conn_iter);
3106 drm_for_each_connector_iter(connector, &conn_iter)
53f5e3ca 3107 intel_connector_info(m, connector);
3f6a5e1e
SV
3108 drm_connector_list_iter_end(&conn_iter);
3109 mutex_unlock(&dev->mode_config.mutex);
3110
a037121c 3111 intel_runtime_pm_put(dev_priv, wakeref);
53f5e3ca
JB
3112
3113 return 0;
3114}
3115
1b36595f
CW
3116static int i915_engine_info(struct seq_file *m, void *unused)
3117{
3118 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3119 struct intel_engine_cs *engine;
a037121c 3120 intel_wakeref_t wakeref;
3b3f1650 3121 enum intel_engine_id id;
f636edb2 3122 struct drm_printer p;
1b36595f 3123
a037121c 3124 wakeref = intel_runtime_pm_get(dev_priv);
9c870d03 3125
6f56103d
CW
3126 seq_printf(m, "GT awake? %s (epoch %u)\n",
3127 yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
f73b5674
CW
3128 seq_printf(m, "Global active requests: %d\n",
3129 dev_priv->gt.active_requests);
f577a03b 3130 seq_printf(m, "CS timestamp frequency: %u kHz\n",
0258404f 3131 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
f73b5674 3132
f636edb2
CW
3133 p = drm_seq_file_printer(m);
3134 for_each_engine(engine, dev_priv, id)
0db18b17 3135 intel_engine_dump(engine, &p, "%s\n", engine->name);
1b36595f 3136
a037121c 3137 intel_runtime_pm_put(dev_priv, wakeref);
9c870d03 3138
1b36595f
CW
3139 return 0;
3140}
3141
79e9cd5f
LL
3142static int i915_rcs_topology(struct seq_file *m, void *unused)
3143{
3144 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3145 struct drm_printer p = drm_seq_file_printer(m);
3146
0258404f 3147 intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
79e9cd5f
LL
3148
3149 return 0;
3150}
3151
c5418a8b
CW
3152static int i915_shrinker_info(struct seq_file *m, void *unused)
3153{
3154 struct drm_i915_private *i915 = node_to_i915(m->private);
3155
3156 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3157 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3158
3159 return 0;
3160}
3161
728e29d7
SV
3162static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3163{
36cdd013
DW
3164 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3165 struct drm_device *dev = &dev_priv->drm;
728e29d7
SV
3166 int i;
3167
3168 drm_modeset_lock_all(dev);
3169 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3170 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3171
72f775fa 3172 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
0823eb9c 3173 pll->info->id);
2dd66ebd 3174 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2c42e535 3175 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
728e29d7 3176 seq_printf(m, " tracked hardware state:\n");
2c42e535 3177 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
3e369b76 3178 seq_printf(m, " dpll_md: 0x%08x\n",
2c42e535
ACO
3179 pll->state.hw_state.dpll_md);
3180 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
3181 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
3182 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
c27e917e
PZ
3183 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
3184 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
3185 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
3186 pll->state.hw_state.mg_refclkin_ctl);
3187 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3188 pll->state.hw_state.mg_clktop2_coreclkctl1);
3189 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
3190 pll->state.hw_state.mg_clktop2_hsclkctl);
3191 seq_printf(m, " mg_pll_div0: 0x%08x\n",
3192 pll->state.hw_state.mg_pll_div0);
3193 seq_printf(m, " mg_pll_div1: 0x%08x\n",
3194 pll->state.hw_state.mg_pll_div1);
3195 seq_printf(m, " mg_pll_lf: 0x%08x\n",
3196 pll->state.hw_state.mg_pll_lf);
3197 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3198 pll->state.hw_state.mg_pll_frac_lock);
3199 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
3200 pll->state.hw_state.mg_pll_ssc);
3201 seq_printf(m, " mg_pll_bias: 0x%08x\n",
3202 pll->state.hw_state.mg_pll_bias);
3203 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3204 pll->state.hw_state.mg_pll_tdc_coldst_bias);
728e29d7
SV
3205 }
3206 drm_modeset_unlock_all(dev);
3207
3208 return 0;
3209}
3210
1ed1ef9d 3211static int i915_wa_registers(struct seq_file *m, void *unused)
888b5995 3212{
452420d2
TU
3213 struct drm_i915_private *i915 = node_to_i915(m->private);
3214 const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
3215 struct i915_wa *wa;
3216 unsigned int i;
888b5995 3217
452420d2
TU
3218 seq_printf(m, "Workarounds applied: %u\n", wal->count);
3219 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
548764bb 3220 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
452420d2 3221 i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
888b5995
AS
3222
3223 return 0;
3224}
3225
d2d4f39b
KM
3226static int i915_ipc_status_show(struct seq_file *m, void *data)
3227{
3228 struct drm_i915_private *dev_priv = m->private;
3229
3230 seq_printf(m, "Isochronous Priority Control: %s\n",
3231 yesno(dev_priv->ipc_enabled));
3232 return 0;
3233}
3234
3235static int i915_ipc_status_open(struct inode *inode, struct file *file)
3236{
3237 struct drm_i915_private *dev_priv = inode->i_private;
3238
3239 if (!HAS_IPC(dev_priv))
3240 return -ENODEV;
3241
3242 return single_open(file, i915_ipc_status_show, dev_priv);
3243}
3244
3245static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3246 size_t len, loff_t *offp)
3247{
3248 struct seq_file *m = file->private_data;
3249 struct drm_i915_private *dev_priv = m->private;
a037121c 3250 intel_wakeref_t wakeref;
d2d4f39b 3251 bool enable;
d4225a53 3252 int ret;
d2d4f39b
KM
3253
3254 ret = kstrtobool_from_user(ubuf, len, &enable);
3255 if (ret < 0)
3256 return ret;
3257
d4225a53
CW
3258 with_intel_runtime_pm(dev_priv, wakeref) {
3259 if (!dev_priv->ipc_enabled && enable)
3260 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3261 dev_priv->wm.distrust_bios_wm = true;
3262 dev_priv->ipc_enabled = enable;
3263 intel_enable_ipc(dev_priv);
3264 }
d2d4f39b
KM
3265
3266 return len;
3267}
3268
3269static const struct file_operations i915_ipc_status_fops = {
3270 .owner = THIS_MODULE,
3271 .open = i915_ipc_status_open,
3272 .read = seq_read,
3273 .llseek = seq_lseek,
3274 .release = single_release,
3275 .write = i915_ipc_status_write
3276};
3277
c5511e44
DL
3278static int i915_ddb_info(struct seq_file *m, void *unused)
3279{
36cdd013
DW
3280 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3281 struct drm_device *dev = &dev_priv->drm;
c5511e44 3282 struct skl_ddb_entry *entry;
ff43bc37 3283 struct intel_crtc *crtc;
c5511e44 3284
36cdd013 3285 if (INTEL_GEN(dev_priv) < 9)
ab309a6a 3286 return -ENODEV;
2fcffe19 3287
c5511e44
DL
3288 drm_modeset_lock_all(dev);
3289
c5511e44
DL
3290 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3291
ff43bc37
VS
3292 for_each_intel_crtc(&dev_priv->drm, crtc) {
3293 struct intel_crtc_state *crtc_state =
3294 to_intel_crtc_state(crtc->base.state);
3295 enum pipe pipe = crtc->pipe;
3296 enum plane_id plane_id;
3297
c5511e44
DL
3298 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3299
ff43bc37
VS
3300 for_each_plane_id_on_crtc(crtc, plane_id) {
3301 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3302 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
c5511e44
DL
3303 entry->start, entry->end,
3304 skl_ddb_entry_size(entry));
3305 }
3306
ff43bc37 3307 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
c5511e44
DL
3308 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
3309 entry->end, skl_ddb_entry_size(entry));
3310 }
3311
3312 drm_modeset_unlock_all(dev);
3313
3314 return 0;
3315}
3316
a54746e3 3317static void drrs_status_per_crtc(struct seq_file *m,
36cdd013
DW
3318 struct drm_device *dev,
3319 struct intel_crtc *intel_crtc)
a54746e3 3320{
fac5e23e 3321 struct drm_i915_private *dev_priv = to_i915(dev);
a54746e3
VK
3322 struct i915_drrs *drrs = &dev_priv->drrs;
3323 int vrefresh = 0;
26875fe5 3324 struct drm_connector *connector;
3f6a5e1e 3325 struct drm_connector_list_iter conn_iter;
a54746e3 3326
3f6a5e1e
SV
3327 drm_connector_list_iter_begin(dev, &conn_iter);
3328 drm_for_each_connector_iter(connector, &conn_iter) {
26875fe5
ML
3329 if (connector->state->crtc != &intel_crtc->base)
3330 continue;
3331
3332 seq_printf(m, "%s:\n", connector->name);
a54746e3 3333 }
3f6a5e1e 3334 drm_connector_list_iter_end(&conn_iter);
a54746e3
VK
3335
3336 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3337 seq_puts(m, "\tVBT: DRRS_type: Static");
3338 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3339 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3340 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3341 seq_puts(m, "\tVBT: DRRS_type: None");
3342 else
3343 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3344
3345 seq_puts(m, "\n\n");
3346
f77076c9 3347 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
a54746e3
VK
3348 struct intel_panel *panel;
3349
3350 mutex_lock(&drrs->mutex);
3351 /* DRRS Supported */
3352 seq_puts(m, "\tDRRS Supported: Yes\n");
3353
3354 /* disable_drrs() will make drrs->dp NULL */
3355 if (!drrs->dp) {
ce6e2137
R
3356 seq_puts(m, "Idleness DRRS: Disabled\n");
3357 if (dev_priv->psr.enabled)
3358 seq_puts(m,
3359 "\tAs PSR is enabled, DRRS is not enabled\n");
a54746e3
VK
3360 mutex_unlock(&drrs->mutex);
3361 return;
3362 }
3363
3364 panel = &drrs->dp->attached_connector->panel;
3365 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3366 drrs->busy_frontbuffer_bits);
3367
3368 seq_puts(m, "\n\t\t");
3369 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3370 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3371 vrefresh = panel->fixed_mode->vrefresh;
3372 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3373 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3374 vrefresh = panel->downclock_mode->vrefresh;
3375 } else {
3376 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3377 drrs->refresh_rate_type);
3378 mutex_unlock(&drrs->mutex);
3379 return;
3380 }
3381 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3382
3383 seq_puts(m, "\n\t\t");
3384 mutex_unlock(&drrs->mutex);
3385 } else {
3386 /* DRRS not supported. Print the VBT parameter*/
3387 seq_puts(m, "\tDRRS Supported : No");
3388 }
3389 seq_puts(m, "\n");
3390}
3391
3392static int i915_drrs_status(struct seq_file *m, void *unused)
3393{
36cdd013
DW
3394 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3395 struct drm_device *dev = &dev_priv->drm;
a54746e3
VK
3396 struct intel_crtc *intel_crtc;
3397 int active_crtc_cnt = 0;
3398
26875fe5 3399 drm_modeset_lock_all(dev);
a54746e3 3400 for_each_intel_crtc(dev, intel_crtc) {
f77076c9 3401 if (intel_crtc->base.state->active) {
a54746e3
VK
3402 active_crtc_cnt++;
3403 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3404
3405 drrs_status_per_crtc(m, dev, intel_crtc);
3406 }
a54746e3 3407 }
26875fe5 3408 drm_modeset_unlock_all(dev);
a54746e3
VK
3409
3410 if (!active_crtc_cnt)
3411 seq_puts(m, "No active crtc found\n");
3412
3413 return 0;
3414}
3415
11bed958
DA
3416static int i915_dp_mst_info(struct seq_file *m, void *unused)
3417{
36cdd013
DW
3418 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3419 struct drm_device *dev = &dev_priv->drm;
11bed958
DA
3420 struct intel_encoder *intel_encoder;
3421 struct intel_digital_port *intel_dig_port;
b6dabe3b 3422 struct drm_connector *connector;
3f6a5e1e 3423 struct drm_connector_list_iter conn_iter;
b6dabe3b 3424
3f6a5e1e
SV
3425 drm_connector_list_iter_begin(dev, &conn_iter);
3426 drm_for_each_connector_iter(connector, &conn_iter) {
b6dabe3b 3427 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
11bed958 3428 continue;
b6dabe3b
ML
3429
3430 intel_encoder = intel_attached_encoder(connector);
3431 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3432 continue;
3433
3434 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
11bed958
DA
3435 if (!intel_dig_port->dp.can_mst)
3436 continue;
b6dabe3b 3437
40ae80cc 3438 seq_printf(m, "MST Source Port %c\n",
8f4f2797 3439 port_name(intel_dig_port->base.port));
11bed958
DA
3440 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3441 }
3f6a5e1e
SV
3442 drm_connector_list_iter_end(&conn_iter);
3443
11bed958
DA
3444 return 0;
3445}
3446
eb3394fa 3447static ssize_t i915_displayport_test_active_write(struct file *file,
36cdd013
DW
3448 const char __user *ubuf,
3449 size_t len, loff_t *offp)
eb3394fa
TP
3450{
3451 char *input_buffer;
3452 int status = 0;
eb3394fa
TP
3453 struct drm_device *dev;
3454 struct drm_connector *connector;
3f6a5e1e 3455 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3456 struct intel_dp *intel_dp;
3457 int val = 0;
3458
9aaffa34 3459 dev = ((struct seq_file *)file->private_data)->private;
eb3394fa 3460
eb3394fa
TP
3461 if (len == 0)
3462 return 0;
3463
261aeba8
GT
3464 input_buffer = memdup_user_nul(ubuf, len);
3465 if (IS_ERR(input_buffer))
3466 return PTR_ERR(input_buffer);
eb3394fa 3467
eb3394fa
TP
3468 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3469
3f6a5e1e
SV
3470 drm_connector_list_iter_begin(dev, &conn_iter);
3471 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3472 struct intel_encoder *encoder;
3473
eb3394fa
TP
3474 if (connector->connector_type !=
3475 DRM_MODE_CONNECTOR_DisplayPort)
3476 continue;
3477
a874b6a3
ML
3478 encoder = to_intel_encoder(connector->encoder);
3479 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3480 continue;
3481
3482 if (encoder && connector->status == connector_status_connected) {
3483 intel_dp = enc_to_intel_dp(&encoder->base);
eb3394fa
TP
3484 status = kstrtoint(input_buffer, 10, &val);
3485 if (status < 0)
3f6a5e1e 3486 break;
eb3394fa
TP
3487 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3488 /* To prevent erroneous activation of the compliance
3489 * testing code, only accept an actual value of 1 here
3490 */
3491 if (val == 1)
c1617abc 3492 intel_dp->compliance.test_active = 1;
eb3394fa 3493 else
c1617abc 3494 intel_dp->compliance.test_active = 0;
eb3394fa
TP
3495 }
3496 }
3f6a5e1e 3497 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3498 kfree(input_buffer);
3499 if (status < 0)
3500 return status;
3501
3502 *offp += len;
3503 return len;
3504}
3505
3506static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3507{
e4006713
AS
3508 struct drm_i915_private *dev_priv = m->private;
3509 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3510 struct drm_connector *connector;
3f6a5e1e 3511 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3512 struct intel_dp *intel_dp;
3513
3f6a5e1e
SV
3514 drm_connector_list_iter_begin(dev, &conn_iter);
3515 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3516 struct intel_encoder *encoder;
3517
eb3394fa
TP
3518 if (connector->connector_type !=
3519 DRM_MODE_CONNECTOR_DisplayPort)
3520 continue;
3521
a874b6a3
ML
3522 encoder = to_intel_encoder(connector->encoder);
3523 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3524 continue;
3525
3526 if (encoder && connector->status == connector_status_connected) {
3527 intel_dp = enc_to_intel_dp(&encoder->base);
c1617abc 3528 if (intel_dp->compliance.test_active)
eb3394fa
TP
3529 seq_puts(m, "1");
3530 else
3531 seq_puts(m, "0");
3532 } else
3533 seq_puts(m, "0");
3534 }
3f6a5e1e 3535 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3536
3537 return 0;
3538}
3539
3540static int i915_displayport_test_active_open(struct inode *inode,
36cdd013 3541 struct file *file)
eb3394fa 3542{
36cdd013 3543 return single_open(file, i915_displayport_test_active_show,
e4006713 3544 inode->i_private);
eb3394fa
TP
3545}
3546
3547static const struct file_operations i915_displayport_test_active_fops = {
3548 .owner = THIS_MODULE,
3549 .open = i915_displayport_test_active_open,
3550 .read = seq_read,
3551 .llseek = seq_lseek,
3552 .release = single_release,
3553 .write = i915_displayport_test_active_write
3554};
3555
3556static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3557{
e4006713
AS
3558 struct drm_i915_private *dev_priv = m->private;
3559 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3560 struct drm_connector *connector;
3f6a5e1e 3561 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3562 struct intel_dp *intel_dp;
3563
3f6a5e1e
SV
3564 drm_connector_list_iter_begin(dev, &conn_iter);
3565 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3566 struct intel_encoder *encoder;
3567
eb3394fa
TP
3568 if (connector->connector_type !=
3569 DRM_MODE_CONNECTOR_DisplayPort)
3570 continue;
3571
a874b6a3
ML
3572 encoder = to_intel_encoder(connector->encoder);
3573 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3574 continue;
3575
3576 if (encoder && connector->status == connector_status_connected) {
3577 intel_dp = enc_to_intel_dp(&encoder->base);
b48a5ba9
MN
3578 if (intel_dp->compliance.test_type ==
3579 DP_TEST_LINK_EDID_READ)
3580 seq_printf(m, "%lx",
3581 intel_dp->compliance.test_data.edid);
611032bf
MN
3582 else if (intel_dp->compliance.test_type ==
3583 DP_TEST_LINK_VIDEO_PATTERN) {
3584 seq_printf(m, "hdisplay: %d\n",
3585 intel_dp->compliance.test_data.hdisplay);
3586 seq_printf(m, "vdisplay: %d\n",
3587 intel_dp->compliance.test_data.vdisplay);
3588 seq_printf(m, "bpc: %u\n",
3589 intel_dp->compliance.test_data.bpc);
3590 }
eb3394fa
TP
3591 } else
3592 seq_puts(m, "0");
3593 }
3f6a5e1e 3594 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3595
3596 return 0;
3597}
e4006713 3598DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
eb3394fa
TP
3599
3600static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3601{
e4006713
AS
3602 struct drm_i915_private *dev_priv = m->private;
3603 struct drm_device *dev = &dev_priv->drm;
eb3394fa 3604 struct drm_connector *connector;
3f6a5e1e 3605 struct drm_connector_list_iter conn_iter;
eb3394fa
TP
3606 struct intel_dp *intel_dp;
3607
3f6a5e1e
SV
3608 drm_connector_list_iter_begin(dev, &conn_iter);
3609 drm_for_each_connector_iter(connector, &conn_iter) {
a874b6a3
ML
3610 struct intel_encoder *encoder;
3611
eb3394fa
TP
3612 if (connector->connector_type !=
3613 DRM_MODE_CONNECTOR_DisplayPort)
3614 continue;
3615
a874b6a3
ML
3616 encoder = to_intel_encoder(connector->encoder);
3617 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3618 continue;
3619
3620 if (encoder && connector->status == connector_status_connected) {
3621 intel_dp = enc_to_intel_dp(&encoder->base);
c1617abc 3622 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
eb3394fa
TP
3623 } else
3624 seq_puts(m, "0");
3625 }
3f6a5e1e 3626 drm_connector_list_iter_end(&conn_iter);
eb3394fa
TP
3627
3628 return 0;
3629}
e4006713 3630DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
eb3394fa 3631
e5315213 3632static void wm_latency_show(struct seq_file *m, const u16 wm[8])
369a1342 3633{
36cdd013
DW
3634 struct drm_i915_private *dev_priv = m->private;
3635 struct drm_device *dev = &dev_priv->drm;
369a1342 3636 int level;
de38b95c
VS
3637 int num_levels;
3638
36cdd013 3639 if (IS_CHERRYVIEW(dev_priv))
de38b95c 3640 num_levels = 3;
36cdd013 3641 else if (IS_VALLEYVIEW(dev_priv))
de38b95c 3642 num_levels = 1;
04548cba
VS
3643 else if (IS_G4X(dev_priv))
3644 num_levels = 3;
de38b95c 3645 else
5db94019 3646 num_levels = ilk_wm_max_level(dev_priv) + 1;
369a1342
VS
3647
3648 drm_modeset_lock_all(dev);
3649
3650 for (level = 0; level < num_levels; level++) {
3651 unsigned int latency = wm[level];
3652
97e94b22
DL
3653 /*
3654 * - WM1+ latency values in 0.5us units
de38b95c 3655 * - latencies are in us on gen9/vlv/chv
97e94b22 3656 */
04548cba
VS
3657 if (INTEL_GEN(dev_priv) >= 9 ||
3658 IS_VALLEYVIEW(dev_priv) ||
3659 IS_CHERRYVIEW(dev_priv) ||
3660 IS_G4X(dev_priv))
97e94b22
DL
3661 latency *= 10;
3662 else if (level > 0)
369a1342
VS
3663 latency *= 5;
3664
3665 seq_printf(m, "WM%d %u (%u.%u usec)\n",
97e94b22 3666 level, wm[level], latency / 10, latency % 10);
369a1342
VS
3667 }
3668
3669 drm_modeset_unlock_all(dev);
3670}
3671
3672static int pri_wm_latency_show(struct seq_file *m, void *data)
3673{
36cdd013 3674 struct drm_i915_private *dev_priv = m->private;
e5315213 3675 const u16 *latencies;
97e94b22 3676
36cdd013 3677 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3678 latencies = dev_priv->wm.skl_latency;
3679 else
36cdd013 3680 latencies = dev_priv->wm.pri_latency;
369a1342 3681
97e94b22 3682 wm_latency_show(m, latencies);
369a1342
VS
3683
3684 return 0;
3685}
3686
3687static int spr_wm_latency_show(struct seq_file *m, void *data)
3688{
36cdd013 3689 struct drm_i915_private *dev_priv = m->private;
e5315213 3690 const u16 *latencies;
97e94b22 3691
36cdd013 3692 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3693 latencies = dev_priv->wm.skl_latency;
3694 else
36cdd013 3695 latencies = dev_priv->wm.spr_latency;
369a1342 3696
97e94b22 3697 wm_latency_show(m, latencies);
369a1342
VS
3698
3699 return 0;
3700}
3701
3702static int cur_wm_latency_show(struct seq_file *m, void *data)
3703{
36cdd013 3704 struct drm_i915_private *dev_priv = m->private;
e5315213 3705 const u16 *latencies;
97e94b22 3706
36cdd013 3707 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3708 latencies = dev_priv->wm.skl_latency;
3709 else
36cdd013 3710 latencies = dev_priv->wm.cur_latency;
369a1342 3711
97e94b22 3712 wm_latency_show(m, latencies);
369a1342
VS
3713
3714 return 0;
3715}
3716
3717static int pri_wm_latency_open(struct inode *inode, struct file *file)
3718{
36cdd013 3719 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3720
04548cba 3721 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
369a1342
VS
3722 return -ENODEV;
3723
36cdd013 3724 return single_open(file, pri_wm_latency_show, dev_priv);
369a1342
VS
3725}
3726
3727static int spr_wm_latency_open(struct inode *inode, struct file *file)
3728{
36cdd013 3729 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3730
b2ae318a 3731 if (HAS_GMCH(dev_priv))
369a1342
VS
3732 return -ENODEV;
3733
36cdd013 3734 return single_open(file, spr_wm_latency_show, dev_priv);
369a1342
VS
3735}
3736
3737static int cur_wm_latency_open(struct inode *inode, struct file *file)
3738{
36cdd013 3739 struct drm_i915_private *dev_priv = inode->i_private;
369a1342 3740
b2ae318a 3741 if (HAS_GMCH(dev_priv))
369a1342
VS
3742 return -ENODEV;
3743
36cdd013 3744 return single_open(file, cur_wm_latency_show, dev_priv);
369a1342
VS
3745}
3746
3747static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
e5315213 3748 size_t len, loff_t *offp, u16 wm[8])
369a1342
VS
3749{
3750 struct seq_file *m = file->private_data;
36cdd013
DW
3751 struct drm_i915_private *dev_priv = m->private;
3752 struct drm_device *dev = &dev_priv->drm;
e5315213 3753 u16 new[8] = { 0 };
de38b95c 3754 int num_levels;
369a1342
VS
3755 int level;
3756 int ret;
3757 char tmp[32];
3758
36cdd013 3759 if (IS_CHERRYVIEW(dev_priv))
de38b95c 3760 num_levels = 3;
36cdd013 3761 else if (IS_VALLEYVIEW(dev_priv))
de38b95c 3762 num_levels = 1;
04548cba
VS
3763 else if (IS_G4X(dev_priv))
3764 num_levels = 3;
de38b95c 3765 else
5db94019 3766 num_levels = ilk_wm_max_level(dev_priv) + 1;
de38b95c 3767
369a1342
VS
3768 if (len >= sizeof(tmp))
3769 return -EINVAL;
3770
3771 if (copy_from_user(tmp, ubuf, len))
3772 return -EFAULT;
3773
3774 tmp[len] = '\0';
3775
97e94b22
DL
3776 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3777 &new[0], &new[1], &new[2], &new[3],
3778 &new[4], &new[5], &new[6], &new[7]);
369a1342
VS
3779 if (ret != num_levels)
3780 return -EINVAL;
3781
3782 drm_modeset_lock_all(dev);
3783
3784 for (level = 0; level < num_levels; level++)
3785 wm[level] = new[level];
3786
3787 drm_modeset_unlock_all(dev);
3788
3789 return len;
3790}
3791
3792
3793static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3794 size_t len, loff_t *offp)
3795{
3796 struct seq_file *m = file->private_data;
36cdd013 3797 struct drm_i915_private *dev_priv = m->private;
e5315213 3798 u16 *latencies;
369a1342 3799
36cdd013 3800 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3801 latencies = dev_priv->wm.skl_latency;
3802 else
36cdd013 3803 latencies = dev_priv->wm.pri_latency;
97e94b22
DL
3804
3805 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
3806}
3807
3808static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3809 size_t len, loff_t *offp)
3810{
3811 struct seq_file *m = file->private_data;
36cdd013 3812 struct drm_i915_private *dev_priv = m->private;
e5315213 3813 u16 *latencies;
369a1342 3814
36cdd013 3815 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3816 latencies = dev_priv->wm.skl_latency;
3817 else
36cdd013 3818 latencies = dev_priv->wm.spr_latency;
97e94b22
DL
3819
3820 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
3821}
3822
3823static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3824 size_t len, loff_t *offp)
3825{
3826 struct seq_file *m = file->private_data;
36cdd013 3827 struct drm_i915_private *dev_priv = m->private;
e5315213 3828 u16 *latencies;
97e94b22 3829
36cdd013 3830 if (INTEL_GEN(dev_priv) >= 9)
97e94b22
DL
3831 latencies = dev_priv->wm.skl_latency;
3832 else
36cdd013 3833 latencies = dev_priv->wm.cur_latency;
369a1342 3834
97e94b22 3835 return wm_latency_write(file, ubuf, len, offp, latencies);
369a1342
VS
3836}
3837
3838static const struct file_operations i915_pri_wm_latency_fops = {
3839 .owner = THIS_MODULE,
3840 .open = pri_wm_latency_open,
3841 .read = seq_read,
3842 .llseek = seq_lseek,
3843 .release = single_release,
3844 .write = pri_wm_latency_write
3845};
3846
3847static const struct file_operations i915_spr_wm_latency_fops = {
3848 .owner = THIS_MODULE,
3849 .open = spr_wm_latency_open,
3850 .read = seq_read,
3851 .llseek = seq_lseek,
3852 .release = single_release,
3853 .write = spr_wm_latency_write
3854};
3855
3856static const struct file_operations i915_cur_wm_latency_fops = {
3857 .owner = THIS_MODULE,
3858 .open = cur_wm_latency_open,
3859 .read = seq_read,
3860 .llseek = seq_lseek,
3861 .release = single_release,
3862 .write = cur_wm_latency_write
3863};
3864
647416f9
KC
3865static int
3866i915_wedged_get(void *data, u64 *val)
f3cd474b 3867{
36cdd013 3868 struct drm_i915_private *dev_priv = data;
f3cd474b 3869
d98c52cf 3870 *val = i915_terminally_wedged(&dev_priv->gpu_error);
f3cd474b 3871
647416f9 3872 return 0;
f3cd474b
CW
3873}
3874
647416f9
KC
3875static int
3876i915_wedged_set(void *data, u64 val)
f3cd474b 3877{
598b6b5a 3878 struct drm_i915_private *i915 = data;
d46c0517 3879
b8d24a06
MK
3880 /*
3881 * There is no safeguard against this debugfs entry colliding
3882 * with the hangcheck calling same i915_handle_error() in
3883 * parallel, causing an explosion. For now we assume that the
3884 * test harness is responsible enough not to inject gpu hangs
3885 * while it is writing to 'i915_wedged'
3886 */
3887
598b6b5a 3888 if (i915_reset_backoff(&i915->gpu_error))
b8d24a06
MK
3889 return -EAGAIN;
3890
ce800754
CW
3891 i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3892 "Manually set wedged engine mask = %llx", val);
647416f9 3893 return 0;
f3cd474b
CW
3894}
3895
647416f9
KC
3896DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3897 i915_wedged_get, i915_wedged_set,
3a3b4f98 3898 "%llu\n");
f3cd474b 3899
b4a0b32d
CW
3900#define DROP_UNBOUND BIT(0)
3901#define DROP_BOUND BIT(1)
3902#define DROP_RETIRE BIT(2)
3903#define DROP_ACTIVE BIT(3)
3904#define DROP_FREED BIT(4)
3905#define DROP_SHRINK_ALL BIT(5)
3906#define DROP_IDLE BIT(6)
6b048706
CW
3907#define DROP_RESET_ACTIVE BIT(7)
3908#define DROP_RESET_SEQNO BIT(8)
fbbd37b3
CW
3909#define DROP_ALL (DROP_UNBOUND | \
3910 DROP_BOUND | \
3911 DROP_RETIRE | \
3912 DROP_ACTIVE | \
8eadc19b 3913 DROP_FREED | \
b4a0b32d 3914 DROP_SHRINK_ALL |\
6b048706
CW
3915 DROP_IDLE | \
3916 DROP_RESET_ACTIVE | \
3917 DROP_RESET_SEQNO)
647416f9
KC
3918static int
3919i915_drop_caches_get(void *data, u64 *val)
dd624afd 3920{
647416f9 3921 *val = DROP_ALL;
dd624afd 3922
647416f9 3923 return 0;
dd624afd
CW
3924}
3925
647416f9
KC
3926static int
3927i915_drop_caches_set(void *data, u64 val)
dd624afd 3928{
6b048706 3929 struct drm_i915_private *i915 = data;
a037121c 3930 intel_wakeref_t wakeref;
00c26cf9 3931 int ret = 0;
dd624afd 3932
b4a0b32d
CW
3933 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3934 val, val & DROP_ALL);
a037121c 3935 wakeref = intel_runtime_pm_get(i915);
dd624afd 3936
ad4062da
CW
3937 if (val & DROP_RESET_ACTIVE &&
3938 wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
6b048706
CW
3939 i915_gem_set_wedged(i915);
3940
dd624afd
CW
3941 /* No need to check and wait for gpu resets, only libdrm auto-restarts
3942 * on ioctls on -EAGAIN. */
6b048706
CW
3943 if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
3944 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
dd624afd 3945 if (ret)
198a2a2f 3946 goto out;
dd624afd 3947
00c26cf9 3948 if (val & DROP_ACTIVE)
6b048706 3949 ret = i915_gem_wait_for_idle(i915,
00c26cf9 3950 I915_WAIT_INTERRUPTIBLE |
ec625fb9
CW
3951 I915_WAIT_LOCKED,
3952 MAX_SCHEDULE_TIMEOUT);
00c26cf9
CW
3953
3954 if (val & DROP_RETIRE)
6b048706 3955 i915_retire_requests(i915);
00c26cf9 3956
6b048706
CW
3957 mutex_unlock(&i915->drm.struct_mutex);
3958 }
3959
eb8d0f5a 3960 if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(&i915->gpu_error))
6b048706 3961 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
dd624afd 3962
d92a8cfc 3963 fs_reclaim_acquire(GFP_KERNEL);
21ab4e74 3964 if (val & DROP_BOUND)
6b048706 3965 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
4ad72b7f 3966
21ab4e74 3967 if (val & DROP_UNBOUND)
6b048706 3968 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
dd624afd 3969
8eadc19b 3970 if (val & DROP_SHRINK_ALL)
6b048706 3971 i915_gem_shrink_all(i915);
d92a8cfc 3972 fs_reclaim_release(GFP_KERNEL);
8eadc19b 3973
4dfacb0b
CW
3974 if (val & DROP_IDLE) {
3975 do {
6b048706
CW
3976 if (READ_ONCE(i915->gt.active_requests))
3977 flush_delayed_work(&i915->gt.retire_work);
3978 drain_delayed_work(&i915->gt.idle_work);
3979 } while (READ_ONCE(i915->gt.awake));
4dfacb0b 3980 }
b4a0b32d 3981
c9c70471 3982 if (val & DROP_FREED)
6b048706 3983 i915_gem_drain_freed_objects(i915);
fbbd37b3 3984
198a2a2f 3985out:
a037121c 3986 intel_runtime_pm_put(i915, wakeref);
9d3eb2c3 3987
647416f9 3988 return ret;
dd624afd
CW
3989}
3990
647416f9
KC
3991DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3992 i915_drop_caches_get, i915_drop_caches_set,
3993 "0x%08llx\n");
dd624afd 3994
647416f9
KC
3995static int
3996i915_cache_sharing_get(void *data, u64 *val)
07b7ddd9 3997{
36cdd013 3998 struct drm_i915_private *dev_priv = data;
a037121c 3999 intel_wakeref_t wakeref;
d4225a53 4000 u32 snpcr = 0;
07b7ddd9 4001
f3ce44a0 4002 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
004777cb
SV
4003 return -ENODEV;
4004
d4225a53
CW
4005 with_intel_runtime_pm(dev_priv, wakeref)
4006 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
07b7ddd9 4007
647416f9 4008 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
07b7ddd9 4009
647416f9 4010 return 0;
07b7ddd9
JB
4011}
4012
647416f9
KC
4013static int
4014i915_cache_sharing_set(void *data, u64 val)
07b7ddd9 4015{
36cdd013 4016 struct drm_i915_private *dev_priv = data;
a037121c 4017 intel_wakeref_t wakeref;
07b7ddd9 4018
f3ce44a0 4019 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
004777cb
SV
4020 return -ENODEV;
4021
647416f9 4022 if (val > 3)
07b7ddd9
JB
4023 return -EINVAL;
4024
647416f9 4025 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
d4225a53
CW
4026 with_intel_runtime_pm(dev_priv, wakeref) {
4027 u32 snpcr;
4028
4029 /* Update the cache sharing policy here as well */
4030 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4031 snpcr &= ~GEN6_MBC_SNPCR_MASK;
4032 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
4033 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4034 }
07b7ddd9 4035
647416f9 4036 return 0;
07b7ddd9
JB
4037}
4038
647416f9
KC
4039DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4040 i915_cache_sharing_get, i915_cache_sharing_set,
4041 "%llu\n");
07b7ddd9 4042
36cdd013 4043static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4044 struct sseu_dev_info *sseu)
5d39525a 4045{
7aa0b14e
CW
4046#define SS_MAX 2
4047 const int ss_max = SS_MAX;
4048 u32 sig1[SS_MAX], sig2[SS_MAX];
5d39525a 4049 int ss;
5d39525a
JM
4050
4051 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4052 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4053 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4054 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4055
4056 for (ss = 0; ss < ss_max; ss++) {
4057 unsigned int eu_cnt;
4058
4059 if (sig1[ss] & CHV_SS_PG_ENABLE)
4060 /* skip disabled subslice */
4061 continue;
4062
f08a0c92 4063 sseu->slice_mask = BIT(0);
8cc76693 4064 sseu->subslice_mask[0] |= BIT(ss);
5d39525a
JM
4065 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4066 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4067 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4068 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
915490d5
ID
4069 sseu->eu_total += eu_cnt;
4070 sseu->eu_per_subslice = max_t(unsigned int,
4071 sseu->eu_per_subslice, eu_cnt);
5d39525a 4072 }
7aa0b14e 4073#undef SS_MAX
5d39525a
JM
4074}
4075
f8c3dcf9
RV
4076static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4077 struct sseu_dev_info *sseu)
4078{
c7fb3c6c 4079#define SS_MAX 6
0258404f 4080 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
c7fb3c6c 4081 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
f8c3dcf9 4082 int s, ss;
f8c3dcf9 4083
b3e7f866 4084 for (s = 0; s < info->sseu.max_slices; s++) {
f8c3dcf9
RV
4085 /*
4086 * FIXME: Valid SS Mask respects the spec and read
3c64ea8c 4087 * only valid bits for those registers, excluding reserved
f8c3dcf9
RV
4088 * although this seems wrong because it would leave many
4089 * subslices without ACK.
4090 */
4091 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4092 GEN10_PGCTL_VALID_SS_MASK(s);
4093 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4094 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4095 }
4096
4097 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4098 GEN9_PGCTL_SSA_EU19_ACK |
4099 GEN9_PGCTL_SSA_EU210_ACK |
4100 GEN9_PGCTL_SSA_EU311_ACK;
4101 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4102 GEN9_PGCTL_SSB_EU19_ACK |
4103 GEN9_PGCTL_SSB_EU210_ACK |
4104 GEN9_PGCTL_SSB_EU311_ACK;
4105
b3e7f866 4106 for (s = 0; s < info->sseu.max_slices; s++) {
f8c3dcf9
RV
4107 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4108 /* skip disabled slice */
4109 continue;
4110
4111 sseu->slice_mask |= BIT(s);
8cc76693 4112 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
f8c3dcf9 4113
b3e7f866 4114 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
f8c3dcf9
RV
4115 unsigned int eu_cnt;
4116
4117 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4118 /* skip disabled subslice */
4119 continue;
4120
4121 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4122 eu_mask[ss % 2]);
4123 sseu->eu_total += eu_cnt;
4124 sseu->eu_per_subslice = max_t(unsigned int,
4125 sseu->eu_per_subslice,
4126 eu_cnt);
4127 }
4128 }
c7fb3c6c 4129#undef SS_MAX
f8c3dcf9
RV
4130}
4131
36cdd013 4132static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4133 struct sseu_dev_info *sseu)
5d39525a 4134{
c7fb3c6c 4135#define SS_MAX 3
0258404f 4136 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
c7fb3c6c 4137 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
5d39525a 4138 int s, ss;
1c046bc1 4139
b3e7f866 4140 for (s = 0; s < info->sseu.max_slices; s++) {
1c046bc1
JM
4141 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4142 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4143 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4144 }
4145
5d39525a
JM
4146 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4147 GEN9_PGCTL_SSA_EU19_ACK |
4148 GEN9_PGCTL_SSA_EU210_ACK |
4149 GEN9_PGCTL_SSA_EU311_ACK;
4150 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4151 GEN9_PGCTL_SSB_EU19_ACK |
4152 GEN9_PGCTL_SSB_EU210_ACK |
4153 GEN9_PGCTL_SSB_EU311_ACK;
4154
b3e7f866 4155 for (s = 0; s < info->sseu.max_slices; s++) {
5d39525a
JM
4156 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4157 /* skip disabled slice */
4158 continue;
4159
f08a0c92 4160 sseu->slice_mask |= BIT(s);
1c046bc1 4161
f8c3dcf9 4162 if (IS_GEN9_BC(dev_priv))
8cc76693 4163 sseu->subslice_mask[s] =
0258404f 4164 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
1c046bc1 4165
b3e7f866 4166 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
5d39525a
JM
4167 unsigned int eu_cnt;
4168
cc3f90f0 4169 if (IS_GEN9_LP(dev_priv)) {
57ec171e
ID
4170 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4171 /* skip disabled subslice */
4172 continue;
1c046bc1 4173
8cc76693 4174 sseu->subslice_mask[s] |= BIT(ss);
57ec171e 4175 }
1c046bc1 4176
5d39525a
JM
4177 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4178 eu_mask[ss%2]);
915490d5
ID
4179 sseu->eu_total += eu_cnt;
4180 sseu->eu_per_subslice = max_t(unsigned int,
4181 sseu->eu_per_subslice,
4182 eu_cnt);
5d39525a
JM
4183 }
4184 }
c7fb3c6c 4185#undef SS_MAX
5d39525a
JM
4186}
4187
36cdd013 4188static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
915490d5 4189 struct sseu_dev_info *sseu)
91bedd34 4190{
91bedd34 4191 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
36cdd013 4192 int s;
91bedd34 4193
f08a0c92 4194 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
91bedd34 4195
f08a0c92 4196 if (sseu->slice_mask) {
43b67998 4197 sseu->eu_per_subslice =
0258404f 4198 RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
8cc76693
LL
4199 for (s = 0; s < fls(sseu->slice_mask); s++) {
4200 sseu->subslice_mask[s] =
0258404f 4201 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
8cc76693 4202 }
57ec171e
ID
4203 sseu->eu_total = sseu->eu_per_subslice *
4204 sseu_subslice_total(sseu);
91bedd34
ŁD
4205
4206 /* subtract fused off EU(s) from enabled slice(s) */
795b38b3 4207 for (s = 0; s < fls(sseu->slice_mask); s++) {
43b67998 4208 u8 subslice_7eu =
0258404f 4209 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
91bedd34 4210
915490d5 4211 sseu->eu_total -= hweight8(subslice_7eu);
91bedd34
ŁD
4212 }
4213 }
4214}
4215
615d8908
ID
4216static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4217 const struct sseu_dev_info *sseu)
4218{
4219 struct drm_i915_private *dev_priv = node_to_i915(m->private);
4220 const char *type = is_available_info ? "Available" : "Enabled";
8cc76693 4221 int s;
615d8908 4222
c67ba538
ID
4223 seq_printf(m, " %s Slice Mask: %04x\n", type,
4224 sseu->slice_mask);
615d8908 4225 seq_printf(m, " %s Slice Total: %u\n", type,
f08a0c92 4226 hweight8(sseu->slice_mask));
615d8908 4227 seq_printf(m, " %s Subslice Total: %u\n", type,
57ec171e 4228 sseu_subslice_total(sseu));
8cc76693
LL
4229 for (s = 0; s < fls(sseu->slice_mask); s++) {
4230 seq_printf(m, " %s Slice%i subslices: %u\n", type,
4231 s, hweight8(sseu->subslice_mask[s]));
4232 }
615d8908
ID
4233 seq_printf(m, " %s EU Total: %u\n", type,
4234 sseu->eu_total);
4235 seq_printf(m, " %s EU Per Subslice: %u\n", type,
4236 sseu->eu_per_subslice);
4237
4238 if (!is_available_info)
4239 return;
4240
4241 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4242 if (HAS_POOLED_EU(dev_priv))
4243 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
4244
4245 seq_printf(m, " Has Slice Power Gating: %s\n",
4246 yesno(sseu->has_slice_pg));
4247 seq_printf(m, " Has Subslice Power Gating: %s\n",
4248 yesno(sseu->has_subslice_pg));
4249 seq_printf(m, " Has EU Power Gating: %s\n",
4250 yesno(sseu->has_eu_pg));
4251}
4252
3873218f
JM
4253static int i915_sseu_status(struct seq_file *m, void *unused)
4254{
36cdd013 4255 struct drm_i915_private *dev_priv = node_to_i915(m->private);
915490d5 4256 struct sseu_dev_info sseu;
a037121c 4257 intel_wakeref_t wakeref;
3873218f 4258
36cdd013 4259 if (INTEL_GEN(dev_priv) < 8)
3873218f
JM
4260 return -ENODEV;
4261
4262 seq_puts(m, "SSEU Device Info\n");
0258404f 4263 i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
3873218f 4264
7f992aba 4265 seq_puts(m, "SSEU Device Status\n");
915490d5 4266 memset(&sseu, 0, sizeof(sseu));
0258404f
JN
4267 sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4268 sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
8cc76693 4269 sseu.max_eus_per_subslice =
0258404f 4270 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
238010ed 4271
d4225a53
CW
4272 with_intel_runtime_pm(dev_priv, wakeref) {
4273 if (IS_CHERRYVIEW(dev_priv))
4274 cherryview_sseu_device_status(dev_priv, &sseu);
4275 else if (IS_BROADWELL(dev_priv))
4276 broadwell_sseu_device_status(dev_priv, &sseu);
4277 else if (IS_GEN(dev_priv, 9))
4278 gen9_sseu_device_status(dev_priv, &sseu);
4279 else if (INTEL_GEN(dev_priv) >= 10)
4280 gen10_sseu_device_status(dev_priv, &sseu);
7f992aba 4281 }
238010ed 4282
615d8908 4283 i915_print_sseu_info(m, false, &sseu);
7f992aba 4284
3873218f
JM
4285 return 0;
4286}
4287
6d794d42
BW
4288static int i915_forcewake_open(struct inode *inode, struct file *file)
4289{
d7a133d8 4290 struct drm_i915_private *i915 = inode->i_private;
6d794d42 4291
d7a133d8 4292 if (INTEL_GEN(i915) < 6)
6d794d42
BW
4293 return 0;
4294
6ddbb12e 4295 file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
d7a133d8 4296 intel_uncore_forcewake_user_get(i915);
6d794d42
BW
4297
4298 return 0;
4299}
4300
c43b5634 4301static int i915_forcewake_release(struct inode *inode, struct file *file)
6d794d42 4302{
d7a133d8 4303 struct drm_i915_private *i915 = inode->i_private;
6d794d42 4304
d7a133d8 4305 if (INTEL_GEN(i915) < 6)
6d794d42
BW
4306 return 0;
4307
d7a133d8 4308 intel_uncore_forcewake_user_put(i915);
6ddbb12e
TU
4309 intel_runtime_pm_put(i915,
4310 (intel_wakeref_t)(uintptr_t)file->private_data);
6d794d42
BW
4311
4312 return 0;
4313}
4314
4315static const struct file_operations i915_forcewake_fops = {
4316 .owner = THIS_MODULE,
4317 .open = i915_forcewake_open,
4318 .release = i915_forcewake_release,
4319};
4320
317eaa95
L
4321static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4322{
4323 struct drm_i915_private *dev_priv = m->private;
4324 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4325
6fc5d789
LP
4326 /* Synchronize with everything first in case there's been an HPD
4327 * storm, but we haven't finished handling it in the kernel yet
4328 */
4329 synchronize_irq(dev_priv->drm.irq);
4330 flush_work(&dev_priv->hotplug.dig_port_work);
4331 flush_work(&dev_priv->hotplug.hotplug_work);
4332
317eaa95
L
4333 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4334 seq_printf(m, "Detected: %s\n",
4335 yesno(delayed_work_pending(&hotplug->reenable_work)));
4336
4337 return 0;
4338}
4339
4340static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4341 const char __user *ubuf, size_t len,
4342 loff_t *offp)
4343{
4344 struct seq_file *m = file->private_data;
4345 struct drm_i915_private *dev_priv = m->private;
4346 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4347 unsigned int new_threshold;
4348 int i;
4349 char *newline;
4350 char tmp[16];
4351
4352 if (len >= sizeof(tmp))
4353 return -EINVAL;
4354
4355 if (copy_from_user(tmp, ubuf, len))
4356 return -EFAULT;
4357
4358 tmp[len] = '\0';
4359
4360 /* Strip newline, if any */
4361 newline = strchr(tmp, '\n');
4362 if (newline)
4363 *newline = '\0';
4364
4365 if (strcmp(tmp, "reset") == 0)
4366 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4367 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4368 return -EINVAL;
4369
4370 if (new_threshold > 0)
4371 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4372 new_threshold);
4373 else
4374 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4375
4376 spin_lock_irq(&dev_priv->irq_lock);
4377 hotplug->hpd_storm_threshold = new_threshold;
4378 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4379 for_each_hpd_pin(i)
4380 hotplug->stats[i].count = 0;
4381 spin_unlock_irq(&dev_priv->irq_lock);
4382
4383 /* Re-enable hpd immediately if we were in an irq storm */
4384 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4385
4386 return len;
4387}
4388
4389static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4390{
4391 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4392}
4393
4394static const struct file_operations i915_hpd_storm_ctl_fops = {
4395 .owner = THIS_MODULE,
4396 .open = i915_hpd_storm_ctl_open,
4397 .read = seq_read,
4398 .llseek = seq_lseek,
4399 .release = single_release,
4400 .write = i915_hpd_storm_ctl_write
4401};
4402
9a64c650
LP
4403static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4404{
4405 struct drm_i915_private *dev_priv = m->private;
4406
4407 seq_printf(m, "Enabled: %s\n",
4408 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4409
4410 return 0;
4411}
4412
4413static int
4414i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4415{
4416 return single_open(file, i915_hpd_short_storm_ctl_show,
4417 inode->i_private);
4418}
4419
4420static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4421 const char __user *ubuf,
4422 size_t len, loff_t *offp)
4423{
4424 struct seq_file *m = file->private_data;
4425 struct drm_i915_private *dev_priv = m->private;
4426 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4427 char *newline;
4428 char tmp[16];
4429 int i;
4430 bool new_state;
4431
4432 if (len >= sizeof(tmp))
4433 return -EINVAL;
4434
4435 if (copy_from_user(tmp, ubuf, len))
4436 return -EFAULT;
4437
4438 tmp[len] = '\0';
4439
4440 /* Strip newline, if any */
4441 newline = strchr(tmp, '\n');
4442 if (newline)
4443 *newline = '\0';
4444
4445 /* Reset to the "default" state for this system */
4446 if (strcmp(tmp, "reset") == 0)
4447 new_state = !HAS_DP_MST(dev_priv);
4448 else if (kstrtobool(tmp, &new_state) != 0)
4449 return -EINVAL;
4450
4451 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4452 new_state ? "En" : "Dis");
4453
4454 spin_lock_irq(&dev_priv->irq_lock);
4455 hotplug->hpd_short_storm_enabled = new_state;
4456 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4457 for_each_hpd_pin(i)
4458 hotplug->stats[i].count = 0;
4459 spin_unlock_irq(&dev_priv->irq_lock);
4460
4461 /* Re-enable hpd immediately if we were in an irq storm */
4462 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4463
4464 return len;
4465}
4466
4467static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4468 .owner = THIS_MODULE,
4469 .open = i915_hpd_short_storm_ctl_open,
4470 .read = seq_read,
4471 .llseek = seq_lseek,
4472 .release = single_release,
4473 .write = i915_hpd_short_storm_ctl_write,
4474};
4475
35954e88
R
4476static int i915_drrs_ctl_set(void *data, u64 val)
4477{
4478 struct drm_i915_private *dev_priv = data;
4479 struct drm_device *dev = &dev_priv->drm;
138bdac8 4480 struct intel_crtc *crtc;
35954e88
R
4481
4482 if (INTEL_GEN(dev_priv) < 7)
4483 return -ENODEV;
4484
138bdac8
ML
4485 for_each_intel_crtc(dev, crtc) {
4486 struct drm_connector_list_iter conn_iter;
4487 struct intel_crtc_state *crtc_state;
4488 struct drm_connector *connector;
4489 struct drm_crtc_commit *commit;
4490 int ret;
4491
4492 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4493 if (ret)
4494 return ret;
4495
4496 crtc_state = to_intel_crtc_state(crtc->base.state);
4497
4498 if (!crtc_state->base.active ||
4499 !crtc_state->has_drrs)
4500 goto out;
35954e88 4501
138bdac8
ML
4502 commit = crtc_state->base.commit;
4503 if (commit) {
4504 ret = wait_for_completion_interruptible(&commit->hw_done);
4505 if (ret)
4506 goto out;
4507 }
4508
4509 drm_connector_list_iter_begin(dev, &conn_iter);
4510 drm_for_each_connector_iter(connector, &conn_iter) {
4511 struct intel_encoder *encoder;
4512 struct intel_dp *intel_dp;
4513
4514 if (!(crtc_state->base.connector_mask &
4515 drm_connector_mask(connector)))
4516 continue;
4517
4518 encoder = intel_attached_encoder(connector);
35954e88
R
4519 if (encoder->type != INTEL_OUTPUT_EDP)
4520 continue;
4521
4522 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4523 val ? "en" : "dis", val);
4524
4525 intel_dp = enc_to_intel_dp(&encoder->base);
4526 if (val)
4527 intel_edp_drrs_enable(intel_dp,
138bdac8 4528 crtc_state);
35954e88
R
4529 else
4530 intel_edp_drrs_disable(intel_dp,
138bdac8 4531 crtc_state);
35954e88 4532 }
138bdac8
ML
4533 drm_connector_list_iter_end(&conn_iter);
4534
4535out:
4536 drm_modeset_unlock(&crtc->base.mutex);
4537 if (ret)
4538 return ret;
35954e88 4539 }
35954e88
R
4540
4541 return 0;
4542}
4543
4544DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4545
d52ad9cb
ML
4546static ssize_t
4547i915_fifo_underrun_reset_write(struct file *filp,
4548 const char __user *ubuf,
4549 size_t cnt, loff_t *ppos)
4550{
4551 struct drm_i915_private *dev_priv = filp->private_data;
4552 struct intel_crtc *intel_crtc;
4553 struct drm_device *dev = &dev_priv->drm;
4554 int ret;
4555 bool reset;
4556
4557 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4558 if (ret)
4559 return ret;
4560
4561 if (!reset)
4562 return cnt;
4563
4564 for_each_intel_crtc(dev, intel_crtc) {
4565 struct drm_crtc_commit *commit;
4566 struct intel_crtc_state *crtc_state;
4567
4568 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4569 if (ret)
4570 return ret;
4571
4572 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4573 commit = crtc_state->base.commit;
4574 if (commit) {
4575 ret = wait_for_completion_interruptible(&commit->hw_done);
4576 if (!ret)
4577 ret = wait_for_completion_interruptible(&commit->flip_done);
4578 }
4579
4580 if (!ret && crtc_state->base.active) {
4581 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4582 pipe_name(intel_crtc->pipe));
4583
4584 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4585 }
4586
4587 drm_modeset_unlock(&intel_crtc->base.mutex);
4588
4589 if (ret)
4590 return ret;
4591 }
4592
4593 ret = intel_fbc_reset_underrun(dev_priv);
4594 if (ret)
4595 return ret;
4596
4597 return cnt;
4598}
4599
4600static const struct file_operations i915_fifo_underrun_reset_ops = {
4601 .owner = THIS_MODULE,
4602 .open = simple_open,
4603 .write = i915_fifo_underrun_reset_write,
4604 .llseek = default_llseek,
4605};
4606
06c5bf8c 4607static const struct drm_info_list i915_debugfs_list[] = {
311bd68e 4608 {"i915_capabilities", i915_capabilities, 0},
73aa808f 4609 {"i915_gem_objects", i915_gem_object_info, 0},
08c18323 4610 {"i915_gem_gtt", i915_gem_gtt_info, 0},
6d2b8885 4611 {"i915_gem_stolen", i915_gem_stolen_list_info },
a6172a80 4612 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2017263e 4613 {"i915_gem_interrupt", i915_interrupt_info, 0},
493018dc 4614 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
8b417c26 4615 {"i915_guc_info", i915_guc_info, 0},
fdf5d357 4616 {"i915_guc_load_status", i915_guc_load_status_info, 0},
4c7e77fc 4617 {"i915_guc_log_dump", i915_guc_log_dump, 0},
ac58d2ab 4618 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
a8b9370f 4619 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
0509ead1 4620 {"i915_huc_load_status", i915_huc_load_status_info, 0},
adb4bd12 4621 {"i915_frequency_info", i915_frequency_info, 0},
f654449a 4622 {"i915_hangcheck_info", i915_hangcheck_info, 0},
061d06a2 4623 {"i915_reset_info", i915_reset_info, 0},
f97108d1 4624 {"i915_drpc_info", i915_drpc_info, 0},
7648fa99 4625 {"i915_emon_status", i915_emon_status, 0},
23b2f8bb 4626 {"i915_ring_freq_table", i915_ring_freq_table, 0},
9a851789 4627 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
b5e50c3f 4628 {"i915_fbc_status", i915_fbc_status, 0},
92d44621 4629 {"i915_ips_status", i915_ips_status, 0},
4a9bef37 4630 {"i915_sr_status", i915_sr_status, 0},
44834a67 4631 {"i915_opregion", i915_opregion, 0},
ada8f955 4632 {"i915_vbt", i915_vbt, 0},
37811fcc 4633 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
e76d3630 4634 {"i915_context_status", i915_context_status, 0},
f65367b5 4635 {"i915_forcewake_domains", i915_forcewake_domains, 0},
ea16a3cd 4636 {"i915_swizzle_info", i915_swizzle_info, 0},
63573eb7 4637 {"i915_llc", i915_llc, 0},
e91fd8c6 4638 {"i915_edp_psr_status", i915_edp_psr_status, 0},
ec013e7f 4639 {"i915_energy_uJ", i915_energy_uJ, 0},
6455c870 4640 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
1da51581 4641 {"i915_power_domain_info", i915_power_domain_info, 0},
b7cec66d 4642 {"i915_dmc_info", i915_dmc_info, 0},
53f5e3ca 4643 {"i915_display_info", i915_display_info, 0},
1b36595f 4644 {"i915_engine_info", i915_engine_info, 0},
79e9cd5f 4645 {"i915_rcs_topology", i915_rcs_topology, 0},
c5418a8b 4646 {"i915_shrinker_info", i915_shrinker_info, 0},
728e29d7 4647 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
11bed958 4648 {"i915_dp_mst_info", i915_dp_mst_info, 0},
1ed1ef9d 4649 {"i915_wa_registers", i915_wa_registers, 0},
c5511e44 4650 {"i915_ddb_info", i915_ddb_info, 0},
3873218f 4651 {"i915_sseu_status", i915_sseu_status, 0},
a54746e3 4652 {"i915_drrs_status", i915_drrs_status, 0},
1854d5ca 4653 {"i915_rps_boost_info", i915_rps_boost_info, 0},
2017263e 4654};
27c202ad 4655#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2017263e 4656
06c5bf8c 4657static const struct i915_debugfs_files {
34b9674c
SV
4658 const char *name;
4659 const struct file_operations *fops;
4660} i915_debugfs_files[] = {
4661 {"i915_wedged", &i915_wedged_fops},
34b9674c 4662 {"i915_cache_sharing", &i915_cache_sharing_fops},
34b9674c 4663 {"i915_gem_drop_caches", &i915_drop_caches_fops},
98a2f411 4664#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
34b9674c 4665 {"i915_error_state", &i915_error_state_fops},
5a4c6f1b 4666 {"i915_gpu_info", &i915_gpu_info_fops},
98a2f411 4667#endif
d52ad9cb 4668 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
369a1342
VS
4669 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4670 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4671 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4127dc43 4672 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
eb3394fa
TP
4673 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4674 {"i915_dp_test_type", &i915_displayport_test_type_fops},
685534ef 4675 {"i915_dp_test_active", &i915_displayport_test_active_fops},
4977a287
MW
4676 {"i915_guc_log_level", &i915_guc_log_level_fops},
4677 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
d2d4f39b 4678 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
9a64c650 4679 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
35954e88 4680 {"i915_ipc_status", &i915_ipc_status_fops},
54fd3149
DP
4681 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4682 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
34b9674c
SV
4683};
4684
1dac891c 4685int i915_debugfs_register(struct drm_i915_private *dev_priv)
2017263e 4686{
91c8a326 4687 struct drm_minor *minor = dev_priv->drm.primary;
b05eeb0f 4688 struct dentry *ent;
6cc42152 4689 int i;
f3cd474b 4690
b05eeb0f
NT
4691 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4692 minor->debugfs_root, to_i915(minor->dev),
4693 &i915_forcewake_fops);
4694 if (!ent)
4695 return -ENOMEM;
6a9c308d 4696
34b9674c 4697 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
b05eeb0f
NT
4698 ent = debugfs_create_file(i915_debugfs_files[i].name,
4699 S_IRUGO | S_IWUSR,
4700 minor->debugfs_root,
4701 to_i915(minor->dev),
34b9674c 4702 i915_debugfs_files[i].fops);
b05eeb0f
NT
4703 if (!ent)
4704 return -ENOMEM;
34b9674c 4705 }
40633219 4706
27c202ad
BG
4707 return drm_debugfs_create_files(i915_debugfs_list,
4708 I915_DEBUGFS_ENTRIES,
2017263e
BG
4709 minor->debugfs_root, minor);
4710}
4711
aa7471d2
JN
4712struct dpcd_block {
4713 /* DPCD dump start address. */
4714 unsigned int offset;
4715 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4716 unsigned int end;
4717 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4718 size_t size;
4719 /* Only valid for eDP. */
4720 bool edp;
4721};
4722
4723static const struct dpcd_block i915_dpcd_debug[] = {
4724 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4725 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4726 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4727 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4728 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4729 { .offset = DP_SET_POWER },
4730 { .offset = DP_EDP_DPCD_REV },
4731 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4732 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4733 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4734};
4735
4736static int i915_dpcd_show(struct seq_file *m, void *data)
4737{
4738 struct drm_connector *connector = m->private;
4739 struct intel_dp *intel_dp =
4740 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
e5315213 4741 u8 buf[16];
aa7471d2
JN
4742 ssize_t err;
4743 int i;
4744
5c1a8875
MK
4745 if (connector->status != connector_status_connected)
4746 return -ENODEV;
4747
aa7471d2
JN
4748 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4749 const struct dpcd_block *b = &i915_dpcd_debug[i];
4750 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4751
4752 if (b->edp &&
4753 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4754 continue;
4755
4756 /* low tech for now */
4757 if (WARN_ON(size > sizeof(buf)))
4758 continue;
4759
4760 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
65404c89
CW
4761 if (err < 0)
4762 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4763 else
4764 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
b3f9d7d7 4765 }
aa7471d2
JN
4766
4767 return 0;
4768}
e4006713 4769DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
aa7471d2 4770
ecbd6781
DW
4771static int i915_panel_show(struct seq_file *m, void *data)
4772{
4773 struct drm_connector *connector = m->private;
4774 struct intel_dp *intel_dp =
4775 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4776
4777 if (connector->status != connector_status_connected)
4778 return -ENODEV;
4779
4780 seq_printf(m, "Panel power up delay: %d\n",
4781 intel_dp->panel_power_up_delay);
4782 seq_printf(m, "Panel power down delay: %d\n",
4783 intel_dp->panel_power_down_delay);
4784 seq_printf(m, "Backlight on delay: %d\n",
4785 intel_dp->backlight_on_delay);
4786 seq_printf(m, "Backlight off delay: %d\n",
4787 intel_dp->backlight_off_delay);
4788
4789 return 0;
4790}
e4006713 4791DEFINE_SHOW_ATTRIBUTE(i915_panel);
ecbd6781 4792
bdc93fe0
R
4793static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4794{
4795 struct drm_connector *connector = m->private;
4796 struct intel_connector *intel_connector = to_intel_connector(connector);
4797
4798 if (connector->status != connector_status_connected)
4799 return -ENODEV;
4800
4801 /* HDCP is supported by connector */
d3dacc70 4802 if (!intel_connector->hdcp.shim)
bdc93fe0
R
4803 return -EINVAL;
4804
4805 seq_printf(m, "%s:%d HDCP version: ", connector->name,
4806 connector->base.id);
4807 seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4808 "None" : "HDCP1.4");
4809 seq_puts(m, "\n");
4810
4811 return 0;
4812}
4813DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4814
e845f099
MN
4815static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4816{
4817 struct drm_connector *connector = m->private;
4818 struct drm_device *dev = connector->dev;
4819 struct drm_crtc *crtc;
4820 struct intel_dp *intel_dp;
4821 struct drm_modeset_acquire_ctx ctx;
4822 struct intel_crtc_state *crtc_state = NULL;
4823 int ret = 0;
4824 bool try_again = false;
4825
4826 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4827
4828 do {
6afe8925 4829 try_again = false;
e845f099
MN
4830 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4831 &ctx);
4832 if (ret) {
a145b5b0
CW
4833 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4834 try_again = true;
4835 continue;
4836 }
e845f099
MN
4837 break;
4838 }
4839 crtc = connector->state->crtc;
4840 if (connector->status != connector_status_connected || !crtc) {
4841 ret = -ENODEV;
4842 break;
4843 }
4844 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4845 if (ret == -EDEADLK) {
4846 ret = drm_modeset_backoff(&ctx);
4847 if (!ret) {
4848 try_again = true;
4849 continue;
4850 }
4851 break;
4852 } else if (ret) {
4853 break;
4854 }
4855 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4856 crtc_state = to_intel_crtc_state(crtc->state);
4857 seq_printf(m, "DSC_Enabled: %s\n",
4858 yesno(crtc_state->dsc_params.compression_enable));
fed85691
RS
4859 seq_printf(m, "DSC_Sink_Support: %s\n",
4860 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
e845f099
MN
4861 if (!intel_dp_is_edp(intel_dp))
4862 seq_printf(m, "FEC_Sink_Support: %s\n",
4863 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4864 } while (try_again);
4865
4866 drm_modeset_drop_locks(&ctx);
4867 drm_modeset_acquire_fini(&ctx);
4868
4869 return ret;
4870}
4871
4872static ssize_t i915_dsc_fec_support_write(struct file *file,
4873 const char __user *ubuf,
4874 size_t len, loff_t *offp)
4875{
4876 bool dsc_enable = false;
4877 int ret;
4878 struct drm_connector *connector =
4879 ((struct seq_file *)file->private_data)->private;
4880 struct intel_encoder *encoder = intel_attached_encoder(connector);
4881 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4882
4883 if (len == 0)
4884 return 0;
4885
4886 DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4887 len);
4888
4889 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4890 if (ret < 0)
4891 return ret;
4892
4893 DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4894 (dsc_enable) ? "true" : "false");
4895 intel_dp->force_dsc_en = dsc_enable;
4896
4897 *offp += len;
4898 return len;
4899}
4900
4901static int i915_dsc_fec_support_open(struct inode *inode,
4902 struct file *file)
4903{
4904 return single_open(file, i915_dsc_fec_support_show,
4905 inode->i_private);
4906}
4907
4908static const struct file_operations i915_dsc_fec_support_fops = {
4909 .owner = THIS_MODULE,
4910 .open = i915_dsc_fec_support_open,
4911 .read = seq_read,
4912 .llseek = seq_lseek,
4913 .release = single_release,
4914 .write = i915_dsc_fec_support_write
4915};
4916
aa7471d2
JN
4917/**
4918 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4919 * @connector: pointer to a registered drm_connector
4920 *
4921 * Cleanup will be done by drm_connector_unregister() through a call to
4922 * drm_debugfs_connector_remove().
4923 *
4924 * Returns 0 on success, negative error codes on error.
4925 */
4926int i915_debugfs_connector_add(struct drm_connector *connector)
4927{
4928 struct dentry *root = connector->debugfs_entry;
e845f099 4929 struct drm_i915_private *dev_priv = to_i915(connector->dev);
aa7471d2
JN
4930
4931 /* The connector must have been registered beforehands. */
4932 if (!root)
4933 return -ENODEV;
4934
4935 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4936 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
ecbd6781
DW
4937 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4938 connector, &i915_dpcd_fops);
4939
5b7b3086 4940 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
ecbd6781
DW
4941 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4942 connector, &i915_panel_fops);
5b7b3086
DP
4943 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4944 connector, &i915_psr_sink_status_fops);
4945 }
aa7471d2 4946
bdc93fe0
R
4947 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4948 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4949 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4950 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4951 connector, &i915_hdcp_sink_capability_fops);
4952 }
4953
e845f099
MN
4954 if (INTEL_GEN(dev_priv) >= 10 &&
4955 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4956 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4957 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4958 connector, &i915_dsc_fec_support_fops);
4959
aa7471d2
JN
4960 return 0;
4961}
This page took 2.700818 seconds and 4 git commands to generate.