2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
6 #include "gt/intel_gpu_commands.h"
7 #include "gt/intel_gt.h"
9 #include "gem/i915_gem_internal.h"
10 #include "gem/selftests/igt_gem_utils.h"
12 #include "igt_spinner.h"
14 int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
18 memset(spin, 0, sizeof(*spin));
21 spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
22 if (IS_ERR(spin->hws)) {
23 err = PTR_ERR(spin->hws);
26 i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
28 spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
29 if (IS_ERR(spin->obj)) {
30 err = PTR_ERR(spin->obj);
37 i915_gem_object_put(spin->hws);
42 static void *igt_spinner_pin_obj(struct intel_context *ce,
43 struct i915_gem_ww_ctx *ww,
44 struct drm_i915_gem_object *obj,
45 unsigned int mode, struct i915_vma **vma)
50 *vma = i915_vma_instance(obj, ce->vm, NULL);
52 return ERR_CAST(*vma);
54 ret = i915_gem_object_lock(obj, ww);
58 vaddr = i915_gem_object_pin_map(obj, mode);
61 i915_gem_object_unlock(obj);
67 ret = i915_vma_pin_ww(*vma, ww, 0, 0, PIN_USER);
69 ret = i915_vma_pin(*vma, 0, 0, PIN_USER);
72 i915_gem_object_unpin_map(obj);
79 int igt_spinner_pin(struct igt_spinner *spin,
80 struct intel_context *ce,
81 struct i915_gem_ww_ctx *ww)
85 if (spin->ce && WARN_ON(spin->ce != ce))
90 vaddr = igt_spinner_pin_obj(ce, ww, spin->hws, I915_MAP_WB, &spin->hws_vma);
92 return PTR_ERR(vaddr);
94 spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
100 mode = i915_coherent_map_type(spin->gt->i915, spin->obj, false);
101 vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma);
103 return PTR_ERR(vaddr);
111 static unsigned int seqno_offset(u64 fence)
113 return offset_in_page(sizeof(u32) * fence);
116 static u64 hws_address(const struct i915_vma *hws,
117 const struct i915_request *rq)
119 return i915_vma_offset(hws) + seqno_offset(rq->fence.context);
122 struct i915_request *
123 igt_spinner_create_request(struct igt_spinner *spin,
124 struct intel_context *ce,
125 u32 arbitration_command)
127 struct intel_engine_cs *engine = ce->engine;
128 struct i915_request *rq = NULL;
129 struct i915_vma *hws, *vma;
134 GEM_BUG_ON(spin->gt != ce->vm->gt);
136 if (!intel_engine_can_store_dword(ce->engine))
137 return ERR_PTR(-ENODEV);
140 err = igt_spinner_pin(spin, ce, NULL);
146 vma = spin->batch_vma;
148 rq = intel_context_create_request(ce);
152 err = igt_vma_move_to_active_unlocked(vma, rq, 0);
156 err = igt_vma_move_to_active_unlocked(hws, rq, 0);
162 if (GRAPHICS_VER(rq->engine->i915) >= 8) {
163 *batch++ = MI_STORE_DWORD_IMM_GEN4;
164 *batch++ = lower_32_bits(hws_address(hws, rq));
165 *batch++ = upper_32_bits(hws_address(hws, rq));
166 } else if (GRAPHICS_VER(rq->engine->i915) >= 6) {
167 *batch++ = MI_STORE_DWORD_IMM_GEN4;
169 *batch++ = hws_address(hws, rq);
170 } else if (GRAPHICS_VER(rq->engine->i915) >= 4) {
171 *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
173 *batch++ = hws_address(hws, rq);
175 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
176 *batch++ = hws_address(hws, rq);
178 *batch++ = rq->fence.seqno;
180 *batch++ = arbitration_command;
182 if (GRAPHICS_VER(rq->engine->i915) >= 8)
183 *batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
184 else if (IS_HASWELL(rq->engine->i915))
185 *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
186 else if (GRAPHICS_VER(rq->engine->i915) >= 6)
187 *batch++ = MI_BATCH_BUFFER_START;
189 *batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
190 *batch++ = lower_32_bits(i915_vma_offset(vma));
191 *batch++ = upper_32_bits(i915_vma_offset(vma));
193 *batch++ = MI_BATCH_BUFFER_END; /* not reached */
195 intel_gt_chipset_flush(engine->gt);
197 if (engine->emit_init_breadcrumb) {
198 err = engine->emit_init_breadcrumb(rq);
204 if (GRAPHICS_VER(rq->engine->i915) <= 5)
205 flags |= I915_DISPATCH_SECURE;
206 err = engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
210 i915_request_set_error_once(rq, err);
211 i915_request_add(rq);
213 return err ? ERR_PTR(err) : rq;
217 hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
219 u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
221 return READ_ONCE(*seqno);
224 void igt_spinner_end(struct igt_spinner *spin)
229 *spin->batch = MI_BATCH_BUFFER_END;
230 intel_gt_chipset_flush(spin->gt);
233 void igt_spinner_fini(struct igt_spinner *spin)
235 igt_spinner_end(spin);
238 i915_vma_unpin(spin->batch_vma);
239 i915_gem_object_unpin_map(spin->obj);
241 i915_gem_object_put(spin->obj);
244 i915_vma_unpin(spin->hws_vma);
245 i915_gem_object_unpin_map(spin->hws);
247 i915_gem_object_put(spin->hws);
250 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
252 if (i915_request_is_ready(rq))
253 intel_engine_flush_submission(rq->engine);
255 return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
258 wait_for(i915_seqno_passed(hws_seqno(spin, rq),