]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/selftests/intel_uncore.c
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[linux.git] / drivers / gpu / drm / i915 / selftests / intel_uncore.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include "../i915_selftest.h"
26
27 static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
28                                 unsigned int num_ranges,
29                                 bool is_watertight)
30 {
31         unsigned int i;
32         s32 prev;
33
34         for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
35                 /* Check that the table is watertight */
36                 if (is_watertight && (prev + 1) != (s32)ranges->start) {
37                         pr_err("%s: entry[%d]:(%x, %x) is not watertight to previous (%x)\n",
38                                __func__, i, ranges->start, ranges->end, prev);
39                         return -EINVAL;
40                 }
41
42                 /* Check that the table never goes backwards */
43                 if (prev >= (s32)ranges->start) {
44                         pr_err("%s: entry[%d]:(%x, %x) is less than the previous (%x)\n",
45                                __func__, i, ranges->start, ranges->end, prev);
46                         return -EINVAL;
47                 }
48
49                 /* Check that the entry is valid */
50                 if (ranges->start >= ranges->end) {
51                         pr_err("%s: entry[%d]:(%x, %x) has negative length\n",
52                                __func__, i, ranges->start, ranges->end);
53                         return -EINVAL;
54                 }
55
56                 prev = ranges->end;
57         }
58
59         return 0;
60 }
61
62 static int intel_shadow_table_check(void)
63 {
64         struct {
65                 const struct i915_range *regs;
66                 unsigned int size;
67         } range_lists[] = {
68                 { gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
69                 { gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
70                 { gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
71                 { dg2_shadowed_regs, ARRAY_SIZE(dg2_shadowed_regs) },
72                 { pvc_shadowed_regs, ARRAY_SIZE(pvc_shadowed_regs) },
73                 { mtl_shadowed_regs, ARRAY_SIZE(mtl_shadowed_regs) },
74                 { xelpmp_shadowed_regs, ARRAY_SIZE(xelpmp_shadowed_regs) },
75         };
76         const struct i915_range *range;
77         unsigned int i, j;
78         s32 prev;
79
80         for (j = 0; j < ARRAY_SIZE(range_lists); ++j) {
81                 range = range_lists[j].regs;
82                 for (i = 0, prev = -1; i < range_lists[j].size; i++, range++) {
83                         if (range->end < range->start) {
84                                 pr_err("%s: range[%d]:(%06x-%06x) has end before start\n",
85                                        __func__, i, range->start, range->end);
86                                 return -EINVAL;
87                         }
88
89                         if (prev >= (s32)range->start) {
90                                 pr_err("%s: range[%d]:(%06x-%06x) is before end of previous (%06x)\n",
91                                        __func__, i, range->start, range->end, prev);
92                                 return -EINVAL;
93                         }
94
95                         if (range->start % 4) {
96                                 pr_err("%s: range[%d]:(%06x-%06x) has non-dword-aligned start\n",
97                                        __func__, i, range->start, range->end);
98                                 return -EINVAL;
99                         }
100
101                         prev = range->end;
102                 }
103         }
104
105         return 0;
106 }
107
108 int intel_uncore_mock_selftests(void)
109 {
110         struct {
111                 const struct intel_forcewake_range *ranges;
112                 unsigned int num_ranges;
113                 bool is_watertight;
114         } fw[] = {
115                 { __vlv_fw_ranges, ARRAY_SIZE(__vlv_fw_ranges), false },
116                 { __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false },
117                 { __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
118                 { __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
119                 { __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true },
120                 { __xehp_fw_ranges, ARRAY_SIZE(__xehp_fw_ranges), true },
121                 { __pvc_fw_ranges, ARRAY_SIZE(__pvc_fw_ranges), true },
122                 { __mtl_fw_ranges, ARRAY_SIZE(__mtl_fw_ranges), true },
123                 { __xelpmp_fw_ranges, ARRAY_SIZE(__xelpmp_fw_ranges), true },
124         };
125         int err, i;
126
127         for (i = 0; i < ARRAY_SIZE(fw); i++) {
128                 err = intel_fw_table_check(fw[i].ranges,
129                                            fw[i].num_ranges,
130                                            fw[i].is_watertight);
131                 if (err)
132                         return err;
133         }
134
135         err = intel_shadow_table_check();
136         if (err)
137                 return err;
138
139         return 0;
140 }
141
142 static int live_forcewake_ops(void *arg)
143 {
144         static const struct reg {
145                 const char *name;
146                 u8 min_graphics_ver;
147                 u8 max_graphics_ver;
148                 unsigned long platforms;
149                 unsigned int offset;
150         } registers[] = {
151                 {
152                         "RING_START",
153                         6, 7,
154                         0x38,
155                 },
156                 {
157                         "RING_MI_MODE",
158                         8, U8_MAX,
159                         0x9c,
160                 }
161         };
162         const struct reg *r;
163         struct intel_gt *gt = arg;
164         struct intel_uncore_forcewake_domain *domain;
165         struct intel_uncore *uncore = gt->uncore;
166         struct intel_engine_cs *engine;
167         enum intel_engine_id id;
168         intel_wakeref_t wakeref;
169         unsigned int tmp;
170         int err = 0;
171
172         GEM_BUG_ON(gt->awake);
173
174         /* vlv/chv with their pcu behave differently wrt reads */
175         if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) {
176                 pr_debug("PCU fakes forcewake badly; skipping\n");
177                 return 0;
178         }
179
180         /*
181          * Not quite as reliable across the gen as one would hope.
182          *
183          * Either our theory of operation is incorrect, or there remain
184          * external parties interfering with the powerwells.
185          *
186          * https://bugs.freedesktop.org/show_bug.cgi?id=110210
187          */
188         if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
189                 return 0;
190
191         /* We have to pick carefully to get the exact behaviour we need */
192         for (r = registers; r->name; r++)
193                 if (IS_GRAPHICS_VER(gt->i915, r->min_graphics_ver, r->max_graphics_ver))
194                         break;
195         if (!r->name) {
196                 pr_debug("Forcewaked register not known for %s; skipping\n",
197                          intel_platform_name(INTEL_INFO(gt->i915)->platform));
198                 return 0;
199         }
200
201         wakeref = intel_runtime_pm_get(uncore->rpm);
202
203         for_each_fw_domain(domain, uncore, tmp) {
204                 smp_store_mb(domain->active, false);
205                 if (!hrtimer_cancel(&domain->timer))
206                         continue;
207
208                 intel_uncore_fw_release_timer(&domain->timer);
209         }
210
211         for_each_engine(engine, gt, id) {
212                 i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
213                 u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset;
214                 enum forcewake_domains fw_domains;
215                 u32 val;
216
217                 if (!engine->default_state)
218                         continue;
219
220                 fw_domains = intel_uncore_forcewake_for_reg(uncore, mmio,
221                                                             FW_REG_READ);
222                 if (!fw_domains)
223                         continue;
224
225                 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
226                         if (!domain->wake_count)
227                                 continue;
228
229                         pr_err("fw_domain %s still active, aborting test!\n",
230                                intel_uncore_forcewake_domain_to_str(domain->id));
231                         err = -EINVAL;
232                         goto out_rpm;
233                 }
234
235                 intel_uncore_forcewake_get(uncore, fw_domains);
236                 val = readl(reg);
237                 intel_uncore_forcewake_put(uncore, fw_domains);
238
239                 /* Flush the forcewake release (delayed onto a timer) */
240                 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
241                         smp_store_mb(domain->active, false);
242                         if (hrtimer_cancel(&domain->timer))
243                                 intel_uncore_fw_release_timer(&domain->timer);
244
245                         preempt_disable();
246                         err = wait_ack_clear(domain, FORCEWAKE_KERNEL);
247                         preempt_enable();
248                         if (err) {
249                                 pr_err("Failed to clear fw_domain %s\n",
250                                        intel_uncore_forcewake_domain_to_str(domain->id));
251                                 goto out_rpm;
252                         }
253                 }
254
255                 if (!val) {
256                         pr_err("%s:%s was zero while fw was held!\n",
257                                engine->name, r->name);
258                         err = -EINVAL;
259                         goto out_rpm;
260                 }
261
262                 /* We then expect the read to return 0 outside of the fw */
263                 if (wait_for(readl(reg) == 0, 100)) {
264                         pr_err("%s:%s=%0x, fw_domains 0x%x still up after 100ms!\n",
265                                engine->name, r->name, readl(reg), fw_domains);
266                         err = -ETIMEDOUT;
267                         goto out_rpm;
268                 }
269         }
270
271 out_rpm:
272         intel_runtime_pm_put(uncore->rpm, wakeref);
273         return err;
274 }
275
276 static int live_forcewake_domains(void *arg)
277 {
278 #define FW_RANGE 0x40000
279         struct intel_gt *gt = arg;
280         struct intel_uncore *uncore = gt->uncore;
281         unsigned long *valid;
282         u32 offset;
283         int err;
284
285         if (!HAS_FPGA_DBG_UNCLAIMED(gt->i915) &&
286             !IS_VALLEYVIEW(gt->i915) &&
287             !IS_CHERRYVIEW(gt->i915))
288                 return 0;
289
290         /*
291          * This test may lockup the machine or cause GPU hangs afterwards.
292          */
293         if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
294                 return 0;
295
296         valid = bitmap_zalloc(FW_RANGE, GFP_KERNEL);
297         if (!valid)
298                 return -ENOMEM;
299
300         intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
301
302         check_for_unclaimed_mmio(uncore);
303         for (offset = 0; offset < FW_RANGE; offset += 4) {
304                 i915_reg_t reg = { offset };
305
306                 intel_uncore_posting_read_fw(uncore, reg);
307                 if (!check_for_unclaimed_mmio(uncore))
308                         set_bit(offset, valid);
309         }
310
311         intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
312
313         err = 0;
314         for_each_set_bit(offset, valid, FW_RANGE) {
315                 i915_reg_t reg = { offset };
316
317                 iosf_mbi_punit_acquire();
318                 intel_uncore_forcewake_reset(uncore);
319                 iosf_mbi_punit_release();
320
321                 check_for_unclaimed_mmio(uncore);
322
323                 intel_uncore_posting_read_fw(uncore, reg);
324                 if (check_for_unclaimed_mmio(uncore)) {
325                         pr_err("Unclaimed mmio read to register 0x%04x\n",
326                                offset);
327                         err = -EINVAL;
328                 }
329         }
330
331         bitmap_free(valid);
332         return err;
333 }
334
335 static int live_fw_table(void *arg)
336 {
337         struct intel_gt *gt = arg;
338
339         /* Confirm the table we load is still valid */
340         return intel_fw_table_check(gt->uncore->fw_domains_table,
341                                     gt->uncore->fw_domains_table_entries,
342                                     GRAPHICS_VER(gt->i915) >= 9);
343 }
344
345 int intel_uncore_live_selftests(struct drm_i915_private *i915)
346 {
347         static const struct i915_subtest tests[] = {
348                 SUBTEST(live_fw_table),
349                 SUBTEST(live_forcewake_ops),
350                 SUBTEST(live_forcewake_domains),
351         };
352
353         return intel_gt_live_subtests(tests, to_gt(i915));
354 }
This page took 0.05474 seconds and 4 git commands to generate.