1 // SPDX-License-Identifier: MIT
3 * Copyright © 2018 Intel Corporation
6 #include <linux/crc32.h>
8 #include "gem/i915_gem_stolen.h"
10 #include "i915_memcpy.h"
11 #include "i915_selftest.h"
12 #include "intel_gpu_commands.h"
13 #include "selftests/igt_reset.h"
14 #include "selftests/igt_atomic.h"
15 #include "selftests/igt_spinner.h"
18 __igt_reset_stolen(struct intel_gt *gt,
19 intel_engine_mask_t mask,
22 struct i915_ggtt *ggtt = gt->ggtt;
23 const struct resource *dsm = >->i915->dsm.stolen;
24 resource_size_t num_pages, page;
25 struct intel_engine_cs *engine;
26 intel_wakeref_t wakeref;
27 enum intel_engine_id id;
28 struct igt_spinner spin;
34 if (!drm_mm_node_allocated(&ggtt->error_capture))
37 num_pages = resource_size(dsm) >> PAGE_SHIFT;
41 crc = kmalloc_array(num_pages, sizeof(u32), GFP_KERNEL);
45 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
51 igt_global_reset_lock(gt);
52 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
54 err = igt_spinner_init(&spin, gt);
58 for_each_engine(engine, gt, id) {
59 struct intel_context *ce;
60 struct i915_request *rq;
62 if (!(mask & engine->mask))
65 if (!intel_engine_can_store_dword(engine))
68 ce = intel_context_create(engine);
73 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
74 intel_context_put(ce);
82 for (page = 0; page < num_pages; page++) {
83 dma_addr_t dma = (dma_addr_t)dsm->start + (page << PAGE_SHIFT);
87 ggtt->vm.insert_page(&ggtt->vm, dma,
88 ggtt->error_capture.start,
89 i915_gem_get_pat_index(gt->i915,
94 s = io_mapping_map_wc(&ggtt->iomap,
95 ggtt->error_capture.start,
98 if (!__drm_mm_interval_first(>->i915->mm.stolen,
100 ((page + 1) << PAGE_SHIFT) - 1))
101 memset_io(s, STACK_MAGIC, PAGE_SIZE);
103 in = (void __force *)s;
104 if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
106 crc[page] = crc32_le(0, in, PAGE_SIZE);
111 ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
113 if (mask == ALL_ENGINES) {
114 intel_gt_reset(gt, mask, NULL);
116 for_each_engine(engine, gt, id) {
117 if (mask & engine->mask)
118 intel_engine_reset(engine, NULL);
124 for (page = 0; page < num_pages; page++) {
125 dma_addr_t dma = (dma_addr_t)dsm->start + (page << PAGE_SHIFT);
130 ggtt->vm.insert_page(&ggtt->vm, dma,
131 ggtt->error_capture.start,
132 i915_gem_get_pat_index(gt->i915,
137 s = io_mapping_map_wc(&ggtt->iomap,
138 ggtt->error_capture.start,
141 in = (void __force *)s;
142 if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE))
144 x = crc32_le(0, in, PAGE_SIZE);
146 if (x != crc[page] &&
147 !__drm_mm_interval_first(>->i915->mm.stolen,
149 ((page + 1) << PAGE_SHIFT) - 1)) {
150 pr_debug("unused stolen page %pa modified by GPU reset\n",
153 igt_hexdump(in, PAGE_SIZE);
160 ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
163 pr_info("%s reset clobbered %ld pages of stolen, last clobber at page %ld\n",
166 if (max >= I915_GEM_STOLEN_BIAS >> PAGE_SHIFT) {
167 pr_err("%s reset clobbered unreserved area [above %x] of stolen; may cause severe faults\n",
168 msg, I915_GEM_STOLEN_BIAS);
173 igt_spinner_fini(&spin);
176 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
177 igt_global_reset_unlock(gt);
185 static int igt_reset_device_stolen(void *arg)
187 return __igt_reset_stolen(arg, ALL_ENGINES, "device");
190 static int igt_reset_engines_stolen(void *arg)
192 struct intel_gt *gt = arg;
193 struct intel_engine_cs *engine;
194 enum intel_engine_id id;
197 if (!intel_has_reset_engine(gt))
200 for_each_engine(engine, gt, id) {
201 err = __igt_reset_stolen(gt, engine->mask, engine->name);
209 static int igt_global_reset(void *arg)
211 struct intel_gt *gt = arg;
212 unsigned int reset_count;
213 intel_wakeref_t wakeref;
216 /* Check that we can issue a global GPU reset */
218 igt_global_reset_lock(gt);
219 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
221 reset_count = i915_reset_count(>->i915->gpu_error);
223 intel_gt_reset(gt, ALL_ENGINES, NULL);
225 if (i915_reset_count(>->i915->gpu_error) == reset_count) {
226 pr_err("No GPU reset recorded!\n");
230 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
231 igt_global_reset_unlock(gt);
233 if (intel_gt_is_wedged(gt))
239 static int igt_wedged_reset(void *arg)
241 struct intel_gt *gt = arg;
242 intel_wakeref_t wakeref;
244 /* Check that we can recover a wedged device with a GPU reset */
246 igt_global_reset_lock(gt);
247 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
249 intel_gt_set_wedged(gt);
251 GEM_BUG_ON(!intel_gt_is_wedged(gt));
252 intel_gt_reset(gt, ALL_ENGINES, NULL);
254 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
255 igt_global_reset_unlock(gt);
257 return intel_gt_is_wedged(gt) ? -EIO : 0;
260 static int igt_atomic_reset(void *arg)
262 struct intel_gt *gt = arg;
263 const typeof(*igt_atomic_phases) *p;
264 intel_wakeref_t wakeref;
267 /* Check that the resets are usable from atomic context */
269 wakeref = intel_gt_pm_get(gt);
270 igt_global_reset_lock(gt);
272 /* Flush any requests before we get started and check basics */
273 if (!igt_force_reset(gt))
276 for (p = igt_atomic_phases; p->name; p++) {
277 intel_engine_mask_t awake;
279 GEM_TRACE("__intel_gt_reset under %s\n", p->name);
281 awake = reset_prepare(gt);
282 p->critical_section_begin();
284 err = intel_gt_reset_all_engines(gt);
286 p->critical_section_end();
287 reset_finish(gt, awake);
290 pr_err("__intel_gt_reset failed under %s\n", p->name);
295 /* As we poke around the guts, do a full reset before continuing. */
299 igt_global_reset_unlock(gt);
300 intel_gt_pm_put(gt, wakeref);
305 static int igt_atomic_engine_reset(void *arg)
307 struct intel_gt *gt = arg;
308 const typeof(*igt_atomic_phases) *p;
309 struct intel_engine_cs *engine;
310 enum intel_engine_id id;
311 intel_wakeref_t wakeref;
314 /* Check that the resets are usable from atomic context */
316 if (!intel_has_reset_engine(gt))
319 if (intel_uc_uses_guc_submission(>->uc))
322 wakeref = intel_gt_pm_get(gt);
323 igt_global_reset_lock(gt);
325 /* Flush any requests before we get started and check basics */
326 if (!igt_force_reset(gt))
329 for_each_engine(engine, gt, id) {
330 struct tasklet_struct *t = &engine->sched_engine->tasklet;
334 intel_engine_pm_get(engine);
336 for (p = igt_atomic_phases; p->name; p++) {
337 GEM_TRACE("intel_engine_reset(%s) under %s\n",
338 engine->name, p->name);
339 if (strcmp(p->name, "softirq"))
342 p->critical_section_begin();
343 err = __intel_engine_reset_bh(engine, NULL);
344 p->critical_section_end();
346 if (strcmp(p->name, "softirq"))
350 pr_err("intel_engine_reset(%s) failed under %s\n",
351 engine->name, p->name);
356 intel_engine_pm_put(engine);
359 tasklet_hi_schedule(t);
365 /* As we poke around the guts, do a full reset before continuing. */
369 igt_global_reset_unlock(gt);
370 intel_gt_pm_put(gt, wakeref);
375 int intel_reset_live_selftests(struct drm_i915_private *i915)
377 static const struct i915_subtest tests[] = {
378 SUBTEST(igt_global_reset), /* attempt to recover GPU first */
379 SUBTEST(igt_reset_device_stolen),
380 SUBTEST(igt_reset_engines_stolen),
381 SUBTEST(igt_wedged_reset),
382 SUBTEST(igt_atomic_reset),
383 SUBTEST(igt_atomic_engine_reset),
385 struct intel_gt *gt = to_gt(i915);
387 if (!intel_has_gpu_reset(gt))
390 if (intel_gt_is_wedged(gt))
391 return -EIO; /* we're long past hope of a successful reset */
393 return intel_gt_live_subtests(tests, gt);