1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright © 2018 Intel Corporation
6 #include <linux/sort.h>
8 #include "intel_gpu_commands.h"
9 #include "intel_gt_pm.h"
10 #include "intel_rps.h"
12 #include "i915_selftest.h"
13 #include "selftests/igt_flush_test.h"
17 static int cmp_u32(const void *A, const void *B)
19 const u32 *a = A, *b = B;
24 static void perf_begin(struct intel_gt *gt)
28 /* Boost gpufreq to max [waitboost] and keep it fixed */
29 atomic_inc(>->rps.num_waiters);
30 schedule_work(>->rps.work);
31 flush_work(>->rps.work);
34 static int perf_end(struct intel_gt *gt)
36 atomic_dec(>->rps.num_waiters);
39 return igt_flush_test(gt->i915);
42 static int write_timestamp(struct i915_request *rq, int slot)
44 struct intel_timeline *tl =
45 rcu_dereference_protected(rq->timeline,
46 !i915_request_signaled(rq));
50 cs = intel_ring_begin(rq, 4);
54 cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
55 if (GRAPHICS_VER(rq->engine->i915) >= 8)
58 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
59 *cs++ = tl->hwsp_offset + slot * sizeof(u32);
62 intel_ring_advance(rq, cs);
67 static struct i915_vma *create_empty_batch(struct intel_context *ce)
69 struct drm_i915_gem_object *obj;
74 obj = i915_gem_object_create_internal(ce->engine->i915, PAGE_SIZE);
78 cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
84 cs[0] = MI_BATCH_BUFFER_END;
86 i915_gem_object_flush_map(obj);
88 vma = i915_vma_instance(obj, ce->vm, NULL);
94 err = i915_vma_pin(vma, 0, 0, PIN_USER);
98 i915_gem_object_unpin_map(obj);
102 i915_gem_object_unpin_map(obj);
104 i915_gem_object_put(obj);
108 static u32 trifilter(u32 *a)
112 sort(a, COUNT, sizeof(*a), cmp_u32, NULL);
114 sum = mul_u32_u32(a[2], 2);
121 static int perf_mi_bb_start(void *arg)
123 struct intel_gt *gt = arg;
124 struct intel_engine_cs *engine;
125 enum intel_engine_id id;
128 if (GRAPHICS_VER(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
132 for_each_engine(engine, gt, id) {
133 struct intel_context *ce = engine->kernel_context;
134 struct i915_vma *batch;
138 intel_engine_pm_get(engine);
140 batch = create_empty_batch(ce);
142 err = PTR_ERR(batch);
143 intel_engine_pm_put(engine);
147 err = i915_vma_sync(batch);
149 intel_engine_pm_put(engine);
154 for (i = 0; i < ARRAY_SIZE(cycles); i++) {
155 struct i915_request *rq;
157 rq = i915_request_create(ce);
163 err = write_timestamp(rq, 2);
167 err = rq->engine->emit_bb_start(rq,
168 batch->node.start, 8,
173 err = write_timestamp(rq, 3);
178 i915_request_get(rq);
179 i915_request_add(rq);
181 if (i915_request_wait(rq, 0, HZ / 5) < 0)
183 i915_request_put(rq);
187 cycles[i] = rq->hwsp_seqno[3] - rq->hwsp_seqno[2];
190 intel_engine_pm_put(engine);
194 pr_info("%s: MI_BB_START cycles: %u\n",
195 engine->name, trifilter(cycles));
203 static struct i915_vma *create_nop_batch(struct intel_context *ce)
205 struct drm_i915_gem_object *obj;
206 struct i915_vma *vma;
210 obj = i915_gem_object_create_internal(ce->engine->i915, SZ_64K);
212 return ERR_CAST(obj);
214 cs = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
220 memset(cs, 0, SZ_64K);
221 cs[SZ_64K / sizeof(*cs) - 1] = MI_BATCH_BUFFER_END;
223 i915_gem_object_flush_map(obj);
225 vma = i915_vma_instance(obj, ce->vm, NULL);
231 err = i915_vma_pin(vma, 0, 0, PIN_USER);
235 i915_gem_object_unpin_map(obj);
239 i915_gem_object_unpin_map(obj);
241 i915_gem_object_put(obj);
245 static int perf_mi_noop(void *arg)
247 struct intel_gt *gt = arg;
248 struct intel_engine_cs *engine;
249 enum intel_engine_id id;
252 if (GRAPHICS_VER(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
256 for_each_engine(engine, gt, id) {
257 struct intel_context *ce = engine->kernel_context;
258 struct i915_vma *base, *nop;
262 intel_engine_pm_get(engine);
264 base = create_empty_batch(ce);
267 intel_engine_pm_put(engine);
271 err = i915_vma_sync(base);
274 intel_engine_pm_put(engine);
278 nop = create_nop_batch(ce);
282 intel_engine_pm_put(engine);
286 err = i915_vma_sync(nop);
290 intel_engine_pm_put(engine);
294 for (i = 0; i < ARRAY_SIZE(cycles); i++) {
295 struct i915_request *rq;
297 rq = i915_request_create(ce);
303 err = write_timestamp(rq, 2);
307 err = rq->engine->emit_bb_start(rq,
313 err = write_timestamp(rq, 3);
317 err = rq->engine->emit_bb_start(rq,
324 err = write_timestamp(rq, 4);
329 i915_request_get(rq);
330 i915_request_add(rq);
332 if (i915_request_wait(rq, 0, HZ / 5) < 0)
334 i915_request_put(rq);
339 (rq->hwsp_seqno[4] - rq->hwsp_seqno[3]) -
340 (rq->hwsp_seqno[3] - rq->hwsp_seqno[2]);
344 intel_engine_pm_put(engine);
348 pr_info("%s: 16K MI_NOOP cycles: %u\n",
349 engine->name, trifilter(cycles));
357 int intel_engine_cs_perf_selftests(struct drm_i915_private *i915)
359 static const struct i915_subtest tests[] = {
360 SUBTEST(perf_mi_bb_start),
361 SUBTEST(perf_mi_noop),
364 if (intel_gt_is_wedged(&i915->gt))
367 return intel_gt_live_subtests(tests, &i915->gt);
370 static int intel_mmio_bases_check(void *arg)
374 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
375 const struct engine_info *info = &intel_engines[i];
378 for (j = 0; j < MAX_MMIO_BASES; j++) {
379 u8 ver = info->mmio_bases[j].graphics_ver;
380 u32 base = info->mmio_bases[j].base;
383 pr_err("%s(%s, class:%d, instance:%d): mmio base for graphics ver %u is before the one for ver %u\n",
385 intel_engine_class_repr(info->class),
386 info->class, info->instance,
395 pr_err("%s(%s, class:%d, instance:%d): invalid mmio base (%x) for graphics ver %u at entry %u\n",
397 intel_engine_class_repr(info->class),
398 info->class, info->instance,
406 pr_debug("%s: min graphics version supported for %s%d is %u\n",
408 intel_engine_class_repr(info->class),
416 int intel_engine_cs_mock_selftests(void)
418 static const struct i915_subtest tests[] = {
419 SUBTEST(intel_mmio_bases_check),
422 return i915_subtests(tests, NULL);