2 * SPDX-License-Identifier: GPL-2.0
4 * Copyright © 2018 Intel Corporation
7 #include <linux/sort.h>
9 #include "intel_gpu_commands.h"
10 #include "intel_gt_pm.h"
11 #include "intel_rps.h"
13 #include "i915_selftest.h"
14 #include "selftests/igt_flush_test.h"
18 static int cmp_u32(const void *A, const void *B)
20 const u32 *a = A, *b = B;
25 static void perf_begin(struct intel_gt *gt)
29 /* Boost gpufreq to max [waitboost] and keep it fixed */
30 atomic_inc(>->rps.num_waiters);
31 schedule_work(>->rps.work);
32 flush_work(>->rps.work);
35 static int perf_end(struct intel_gt *gt)
37 atomic_dec(>->rps.num_waiters);
40 return igt_flush_test(gt->i915);
43 static int write_timestamp(struct i915_request *rq, int slot)
48 cs = intel_ring_begin(rq, 4);
52 cmd = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
53 if (INTEL_GEN(rq->engine->i915) >= 8)
56 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
57 *cs++ = i915_request_timeline(rq)->hwsp_offset + slot * sizeof(u32);
60 intel_ring_advance(rq, cs);
65 static struct i915_vma *create_empty_batch(struct intel_context *ce)
67 struct drm_i915_gem_object *obj;
72 obj = i915_gem_object_create_internal(ce->engine->i915, PAGE_SIZE);
76 cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
82 cs[0] = MI_BATCH_BUFFER_END;
84 i915_gem_object_flush_map(obj);
86 vma = i915_vma_instance(obj, ce->vm, NULL);
92 err = i915_vma_pin(vma, 0, 0, PIN_USER);
96 i915_gem_object_unpin_map(obj);
100 i915_gem_object_unpin_map(obj);
102 i915_gem_object_put(obj);
106 static u32 trifilter(u32 *a)
110 sort(a, COUNT, sizeof(*a), cmp_u32, NULL);
112 sum = mul_u32_u32(a[2], 2);
119 static int perf_mi_bb_start(void *arg)
121 struct intel_gt *gt = arg;
122 struct intel_engine_cs *engine;
123 enum intel_engine_id id;
126 if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
130 for_each_engine(engine, gt, id) {
131 struct intel_context *ce = engine->kernel_context;
132 struct i915_vma *batch;
136 intel_engine_pm_get(engine);
138 batch = create_empty_batch(ce);
140 err = PTR_ERR(batch);
141 intel_engine_pm_put(engine);
145 err = i915_vma_sync(batch);
147 intel_engine_pm_put(engine);
152 for (i = 0; i < ARRAY_SIZE(cycles); i++) {
153 struct i915_request *rq;
155 rq = i915_request_create(ce);
161 err = write_timestamp(rq, 2);
165 err = rq->engine->emit_bb_start(rq,
166 batch->node.start, 8,
171 err = write_timestamp(rq, 3);
176 i915_request_get(rq);
177 i915_request_add(rq);
179 if (i915_request_wait(rq, 0, HZ / 5) < 0)
181 i915_request_put(rq);
185 cycles[i] = rq->hwsp_seqno[3] - rq->hwsp_seqno[2];
188 intel_engine_pm_put(engine);
192 pr_info("%s: MI_BB_START cycles: %u\n",
193 engine->name, trifilter(cycles));
201 static struct i915_vma *create_nop_batch(struct intel_context *ce)
203 struct drm_i915_gem_object *obj;
204 struct i915_vma *vma;
208 obj = i915_gem_object_create_internal(ce->engine->i915, SZ_64K);
210 return ERR_CAST(obj);
212 cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
218 memset(cs, 0, SZ_64K);
219 cs[SZ_64K / sizeof(*cs) - 1] = MI_BATCH_BUFFER_END;
221 i915_gem_object_flush_map(obj);
223 vma = i915_vma_instance(obj, ce->vm, NULL);
229 err = i915_vma_pin(vma, 0, 0, PIN_USER);
233 i915_gem_object_unpin_map(obj);
237 i915_gem_object_unpin_map(obj);
239 i915_gem_object_put(obj);
243 static int perf_mi_noop(void *arg)
245 struct intel_gt *gt = arg;
246 struct intel_engine_cs *engine;
247 enum intel_engine_id id;
250 if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */
254 for_each_engine(engine, gt, id) {
255 struct intel_context *ce = engine->kernel_context;
256 struct i915_vma *base, *nop;
260 intel_engine_pm_get(engine);
262 base = create_empty_batch(ce);
265 intel_engine_pm_put(engine);
269 err = i915_vma_sync(base);
272 intel_engine_pm_put(engine);
276 nop = create_nop_batch(ce);
280 intel_engine_pm_put(engine);
284 err = i915_vma_sync(nop);
288 intel_engine_pm_put(engine);
292 for (i = 0; i < ARRAY_SIZE(cycles); i++) {
293 struct i915_request *rq;
295 rq = i915_request_create(ce);
301 err = write_timestamp(rq, 2);
305 err = rq->engine->emit_bb_start(rq,
311 err = write_timestamp(rq, 3);
315 err = rq->engine->emit_bb_start(rq,
322 err = write_timestamp(rq, 4);
327 i915_request_get(rq);
328 i915_request_add(rq);
330 if (i915_request_wait(rq, 0, HZ / 5) < 0)
332 i915_request_put(rq);
337 (rq->hwsp_seqno[4] - rq->hwsp_seqno[3]) -
338 (rq->hwsp_seqno[3] - rq->hwsp_seqno[2]);
342 intel_engine_pm_put(engine);
346 pr_info("%s: 16K MI_NOOP cycles: %u\n",
347 engine->name, trifilter(cycles));
355 int intel_engine_cs_perf_selftests(struct drm_i915_private *i915)
357 static const struct i915_subtest tests[] = {
358 SUBTEST(perf_mi_bb_start),
359 SUBTEST(perf_mi_noop),
362 if (intel_gt_is_wedged(&i915->gt))
365 return intel_gt_live_subtests(tests, &i915->gt);
368 static int intel_mmio_bases_check(void *arg)
372 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
373 const struct engine_info *info = &intel_engines[i];
376 for (j = 0; j < MAX_MMIO_BASES; j++) {
377 u8 gen = info->mmio_bases[j].gen;
378 u32 base = info->mmio_bases[j].base;
381 pr_err("%s(%s, class:%d, instance:%d): mmio base for gen %x is before the one for gen %x\n",
383 intel_engine_class_repr(info->class),
384 info->class, info->instance,
393 pr_err("%s(%s, class:%d, instance:%d): invalid mmio base (%x) for gen %x at entry %u\n",
395 intel_engine_class_repr(info->class),
396 info->class, info->instance,
404 pr_debug("%s: min gen supported for %s%d is %d\n",
406 intel_engine_class_repr(info->class),
414 int intel_engine_cs_mock_selftests(void)
416 static const struct i915_subtest tests[] = {
417 SUBTEST(intel_mmio_bases_check),
420 return i915_subtests(tests, NULL);