]> Git Repo - linux.git/blame - drivers/gpu/drm/i915/intel_ringbuffer.c
drm/i915/skl: drop workarounds for C0 revision
[linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
CommitLineData
62fdfeaf
EA
1/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <[email protected]>
25 * Zou Nan hai <[email protected]>
26 * Xiang Hai hao<[email protected]>
27 *
28 */
29
a4d8a0fe 30#include <linux/log2.h>
760285e7 31#include <drm/drmP.h>
62fdfeaf 32#include "i915_drv.h"
760285e7 33#include <drm/i915_drm.h>
62fdfeaf 34#include "i915_trace.h"
881f47b6 35#include "intel_drv.h"
62fdfeaf 36
a0442461
CW
37/* Rough estimate of the typical request size, performing a flush,
38 * set-context and then emitting the batch.
39 */
40#define LEGACY_REQUEST_SIZE 200
41
82e104cc 42int __intel_ring_space(int head, int tail, int size)
c7dca47b 43{
4f54741e
DG
44 int space = head - tail;
45 if (space <= 0)
1cf0ba14 46 space += size;
4f54741e 47 return space - I915_RING_FREE_SPACE;
c7dca47b
CW
48}
49
32c04f16 50void intel_ring_update_space(struct intel_ring *ring)
ebd0fd4b 51{
32c04f16
CW
52 if (ring->last_retired_head != -1) {
53 ring->head = ring->last_retired_head;
54 ring->last_retired_head = -1;
ebd0fd4b
DG
55 }
56
32c04f16
CW
57 ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
58 ring->tail, ring->size);
ebd0fd4b
DG
59}
60
b72f3acb 61static int
7c9cf4e3 62gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
46f0f8d1 63{
7e37f889 64 struct intel_ring *ring = req->ring;
46f0f8d1
CW
65 u32 cmd;
66 int ret;
67
68 cmd = MI_FLUSH;
46f0f8d1 69
7c9cf4e3 70 if (mode & EMIT_INVALIDATE)
46f0f8d1
CW
71 cmd |= MI_READ_FLUSH;
72
5fb9de1a 73 ret = intel_ring_begin(req, 2);
46f0f8d1
CW
74 if (ret)
75 return ret;
76
b5321f30
CW
77 intel_ring_emit(ring, cmd);
78 intel_ring_emit(ring, MI_NOOP);
79 intel_ring_advance(ring);
46f0f8d1
CW
80
81 return 0;
82}
83
84static int
7c9cf4e3 85gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
62fdfeaf 86{
7e37f889 87 struct intel_ring *ring = req->ring;
6f392d54 88 u32 cmd;
b72f3acb 89 int ret;
6f392d54 90
36d527de
CW
91 /*
92 * read/write caches:
93 *
94 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
95 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
96 * also flushed at 2d versus 3d pipeline switches.
97 *
98 * read-only caches:
99 *
100 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
101 * MI_READ_FLUSH is set, and is always flushed on 965.
102 *
103 * I915_GEM_DOMAIN_COMMAND may not exist?
104 *
105 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
106 * invalidated when MI_EXE_FLUSH is set.
107 *
108 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
109 * invalidated with every MI_FLUSH.
110 *
111 * TLBs:
112 *
113 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
114 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
115 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
116 * are flushed at any MI_FLUSH.
117 */
118
b5321f30 119 cmd = MI_FLUSH;
7c9cf4e3 120 if (mode & EMIT_INVALIDATE) {
36d527de 121 cmd |= MI_EXE_FLUSH;
b5321f30
CW
122 if (IS_G4X(req->i915) || IS_GEN5(req->i915))
123 cmd |= MI_INVALIDATE_ISP;
124 }
70eac33e 125
5fb9de1a 126 ret = intel_ring_begin(req, 2);
36d527de
CW
127 if (ret)
128 return ret;
b72f3acb 129
b5321f30
CW
130 intel_ring_emit(ring, cmd);
131 intel_ring_emit(ring, MI_NOOP);
132 intel_ring_advance(ring);
b72f3acb
CW
133
134 return 0;
8187a2b7
ZN
135}
136
8d315287
JB
137/**
138 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
139 * implementing two workarounds on gen6. From section 1.4.7.1
140 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
141 *
142 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
143 * produced by non-pipelined state commands), software needs to first
144 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
145 * 0.
146 *
147 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
148 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
149 *
150 * And the workaround for these two requires this workaround first:
151 *
152 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
153 * BEFORE the pipe-control with a post-sync op and no write-cache
154 * flushes.
155 *
156 * And this last workaround is tricky because of the requirements on
157 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
158 * volume 2 part 1:
159 *
160 * "1 of the following must also be set:
161 * - Render Target Cache Flush Enable ([12] of DW1)
162 * - Depth Cache Flush Enable ([0] of DW1)
163 * - Stall at Pixel Scoreboard ([1] of DW1)
164 * - Depth Stall ([13] of DW1)
165 * - Post-Sync Operation ([13] of DW1)
166 * - Notify Enable ([8] of DW1)"
167 *
168 * The cache flushes require the workaround flush that triggered this
169 * one, so we can't use it. Depth stall would trigger the same.
170 * Post-sync nonzero is what triggered this second workaround, so we
171 * can't use that one either. Notify enable is IRQs, which aren't
172 * really our business. That leaves only stall at scoreboard.
173 */
174static int
f2cf1fcc 175intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
8d315287 176{
7e37f889 177 struct intel_ring *ring = req->ring;
b5321f30 178 u32 scratch_addr =
bde13ebd 179 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
8d315287
JB
180 int ret;
181
5fb9de1a 182 ret = intel_ring_begin(req, 6);
8d315287
JB
183 if (ret)
184 return ret;
185
b5321f30
CW
186 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
187 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
8d315287 188 PIPE_CONTROL_STALL_AT_SCOREBOARD);
b5321f30
CW
189 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
190 intel_ring_emit(ring, 0); /* low dword */
191 intel_ring_emit(ring, 0); /* high dword */
192 intel_ring_emit(ring, MI_NOOP);
193 intel_ring_advance(ring);
8d315287 194
5fb9de1a 195 ret = intel_ring_begin(req, 6);
8d315287
JB
196 if (ret)
197 return ret;
198
b5321f30
CW
199 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
200 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
201 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
202 intel_ring_emit(ring, 0);
203 intel_ring_emit(ring, 0);
204 intel_ring_emit(ring, MI_NOOP);
205 intel_ring_advance(ring);
8d315287
JB
206
207 return 0;
208}
209
210static int
7c9cf4e3 211gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
8d315287 212{
7e37f889 213 struct intel_ring *ring = req->ring;
b5321f30 214 u32 scratch_addr =
bde13ebd 215 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
8d315287 216 u32 flags = 0;
8d315287
JB
217 int ret;
218
b3111509 219 /* Force SNB workarounds for PIPE_CONTROL flushes */
f2cf1fcc 220 ret = intel_emit_post_sync_nonzero_flush(req);
b3111509
PZ
221 if (ret)
222 return ret;
223
8d315287
JB
224 /* Just flush everything. Experiments have shown that reducing the
225 * number of bits based on the write domains has little performance
226 * impact.
227 */
7c9cf4e3 228 if (mode & EMIT_FLUSH) {
7d54a904
CW
229 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
230 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
231 /*
232 * Ensure that any following seqno writes only happen
233 * when the render cache is indeed flushed.
234 */
97f209bc 235 flags |= PIPE_CONTROL_CS_STALL;
7d54a904 236 }
7c9cf4e3 237 if (mode & EMIT_INVALIDATE) {
7d54a904
CW
238 flags |= PIPE_CONTROL_TLB_INVALIDATE;
239 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
240 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
241 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
242 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
243 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
244 /*
245 * TLB invalidate requires a post-sync write.
246 */
3ac78313 247 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
7d54a904 248 }
8d315287 249
5fb9de1a 250 ret = intel_ring_begin(req, 4);
8d315287
JB
251 if (ret)
252 return ret;
253
b5321f30
CW
254 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
255 intel_ring_emit(ring, flags);
256 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
257 intel_ring_emit(ring, 0);
258 intel_ring_advance(ring);
8d315287
JB
259
260 return 0;
261}
262
f3987631 263static int
f2cf1fcc 264gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
f3987631 265{
7e37f889 266 struct intel_ring *ring = req->ring;
f3987631
PZ
267 int ret;
268
5fb9de1a 269 ret = intel_ring_begin(req, 4);
f3987631
PZ
270 if (ret)
271 return ret;
272
b5321f30
CW
273 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
274 intel_ring_emit(ring,
275 PIPE_CONTROL_CS_STALL |
276 PIPE_CONTROL_STALL_AT_SCOREBOARD);
277 intel_ring_emit(ring, 0);
278 intel_ring_emit(ring, 0);
279 intel_ring_advance(ring);
f3987631
PZ
280
281 return 0;
282}
283
4772eaeb 284static int
7c9cf4e3 285gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
4772eaeb 286{
7e37f889 287 struct intel_ring *ring = req->ring;
b5321f30 288 u32 scratch_addr =
bde13ebd 289 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
4772eaeb 290 u32 flags = 0;
4772eaeb
PZ
291 int ret;
292
f3987631
PZ
293 /*
294 * Ensure that any following seqno writes only happen when the render
295 * cache is indeed flushed.
296 *
297 * Workaround: 4th PIPE_CONTROL command (except the ones with only
298 * read-cache invalidate bits set) must have the CS_STALL bit set. We
299 * don't try to be clever and just set it unconditionally.
300 */
301 flags |= PIPE_CONTROL_CS_STALL;
302
4772eaeb
PZ
303 /* Just flush everything. Experiments have shown that reducing the
304 * number of bits based on the write domains has little performance
305 * impact.
306 */
7c9cf4e3 307 if (mode & EMIT_FLUSH) {
4772eaeb
PZ
308 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
309 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
965fd602 310 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
40a24488 311 flags |= PIPE_CONTROL_FLUSH_ENABLE;
4772eaeb 312 }
7c9cf4e3 313 if (mode & EMIT_INVALIDATE) {
4772eaeb
PZ
314 flags |= PIPE_CONTROL_TLB_INVALIDATE;
315 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
316 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
317 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
318 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
319 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
148b83d0 320 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
4772eaeb
PZ
321 /*
322 * TLB invalidate requires a post-sync write.
323 */
324 flags |= PIPE_CONTROL_QW_WRITE;
b9e1faa7 325 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
f3987631 326
add284a3
CW
327 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
328
f3987631
PZ
329 /* Workaround: we must issue a pipe_control with CS-stall bit
330 * set before a pipe_control command that has the state cache
331 * invalidate bit set. */
f2cf1fcc 332 gen7_render_ring_cs_stall_wa(req);
4772eaeb
PZ
333 }
334
5fb9de1a 335 ret = intel_ring_begin(req, 4);
4772eaeb
PZ
336 if (ret)
337 return ret;
338
b5321f30
CW
339 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
340 intel_ring_emit(ring, flags);
341 intel_ring_emit(ring, scratch_addr);
342 intel_ring_emit(ring, 0);
343 intel_ring_advance(ring);
4772eaeb
PZ
344
345 return 0;
346}
347
884ceace 348static int
f2cf1fcc 349gen8_emit_pipe_control(struct drm_i915_gem_request *req,
884ceace
KG
350 u32 flags, u32 scratch_addr)
351{
7e37f889 352 struct intel_ring *ring = req->ring;
884ceace
KG
353 int ret;
354
5fb9de1a 355 ret = intel_ring_begin(req, 6);
884ceace
KG
356 if (ret)
357 return ret;
358
b5321f30
CW
359 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
360 intel_ring_emit(ring, flags);
361 intel_ring_emit(ring, scratch_addr);
362 intel_ring_emit(ring, 0);
363 intel_ring_emit(ring, 0);
364 intel_ring_emit(ring, 0);
365 intel_ring_advance(ring);
884ceace
KG
366
367 return 0;
368}
369
a5f3d68e 370static int
7c9cf4e3 371gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
a5f3d68e 372{
56c0f1a7 373 u32 scratch_addr =
bde13ebd 374 i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
b5321f30 375 u32 flags = 0;
02c9f7e3 376 int ret;
a5f3d68e
BW
377
378 flags |= PIPE_CONTROL_CS_STALL;
379
7c9cf4e3 380 if (mode & EMIT_FLUSH) {
a5f3d68e
BW
381 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
382 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
965fd602 383 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
40a24488 384 flags |= PIPE_CONTROL_FLUSH_ENABLE;
a5f3d68e 385 }
7c9cf4e3 386 if (mode & EMIT_INVALIDATE) {
a5f3d68e
BW
387 flags |= PIPE_CONTROL_TLB_INVALIDATE;
388 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
389 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
390 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
391 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
392 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
393 flags |= PIPE_CONTROL_QW_WRITE;
394 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
02c9f7e3
KG
395
396 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
f2cf1fcc 397 ret = gen8_emit_pipe_control(req,
02c9f7e3
KG
398 PIPE_CONTROL_CS_STALL |
399 PIPE_CONTROL_STALL_AT_SCOREBOARD,
400 0);
401 if (ret)
402 return ret;
a5f3d68e
BW
403 }
404
f2cf1fcc 405 return gen8_emit_pipe_control(req, flags, scratch_addr);
a5f3d68e
BW
406}
407
7e37f889 408u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
8187a2b7 409{
c033666a 410 struct drm_i915_private *dev_priv = engine->i915;
50877445 411 u64 acthd;
8187a2b7 412
c033666a 413 if (INTEL_GEN(dev_priv) >= 8)
0bc40be8
TU
414 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
415 RING_ACTHD_UDW(engine->mmio_base));
c033666a 416 else if (INTEL_GEN(dev_priv) >= 4)
0bc40be8 417 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
50877445
CW
418 else
419 acthd = I915_READ(ACTHD);
420
421 return acthd;
8187a2b7
ZN
422}
423
0bc40be8 424static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
035dc1e0 425{
c033666a 426 struct drm_i915_private *dev_priv = engine->i915;
035dc1e0
SV
427 u32 addr;
428
429 addr = dev_priv->status_page_dmah->busaddr;
c033666a 430 if (INTEL_GEN(dev_priv) >= 4)
035dc1e0
SV
431 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
432 I915_WRITE(HWS_PGA, addr);
433}
434
0bc40be8 435static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
af75f269 436{
c033666a 437 struct drm_i915_private *dev_priv = engine->i915;
f0f59a00 438 i915_reg_t mmio;
af75f269
DL
439
440 /* The ring status page addresses are no longer next to the rest of
441 * the ring registers as of gen7.
442 */
c033666a 443 if (IS_GEN7(dev_priv)) {
0bc40be8 444 switch (engine->id) {
af75f269
DL
445 case RCS:
446 mmio = RENDER_HWS_PGA_GEN7;
447 break;
448 case BCS:
449 mmio = BLT_HWS_PGA_GEN7;
450 break;
451 /*
452 * VCS2 actually doesn't exist on Gen7. Only shut up
453 * gcc switch check warning
454 */
455 case VCS2:
456 case VCS:
457 mmio = BSD_HWS_PGA_GEN7;
458 break;
459 case VECS:
460 mmio = VEBOX_HWS_PGA_GEN7;
461 break;
462 }
c033666a 463 } else if (IS_GEN6(dev_priv)) {
0bc40be8 464 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
af75f269
DL
465 } else {
466 /* XXX: gen8 returns to sanity */
0bc40be8 467 mmio = RING_HWS_PGA(engine->mmio_base);
af75f269
DL
468 }
469
57e88531 470 I915_WRITE(mmio, engine->status_page.ggtt_offset);
af75f269
DL
471 POSTING_READ(mmio);
472
473 /*
474 * Flush the TLB for this page
475 *
476 * FIXME: These two bits have disappeared on gen8, so a question
477 * arises: do we still need this and if so how should we go about
478 * invalidating the TLB?
479 */
ac657f64 480 if (IS_GEN(dev_priv, 6, 7)) {
0bc40be8 481 i915_reg_t reg = RING_INSTPM(engine->mmio_base);
af75f269
DL
482
483 /* ring should be idle before issuing a sync flush*/
0bc40be8 484 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
af75f269
DL
485
486 I915_WRITE(reg,
487 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
488 INSTPM_SYNC_FLUSH));
25ab57f4
CW
489 if (intel_wait_for_register(dev_priv,
490 reg, INSTPM_SYNC_FLUSH, 0,
491 1000))
af75f269 492 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
0bc40be8 493 engine->name);
af75f269
DL
494 }
495}
496
0bc40be8 497static bool stop_ring(struct intel_engine_cs *engine)
8187a2b7 498{
c033666a 499 struct drm_i915_private *dev_priv = engine->i915;
8187a2b7 500
21a2c58a 501 if (INTEL_GEN(dev_priv) > 2) {
0bc40be8 502 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
3d808eb1
CW
503 if (intel_wait_for_register(dev_priv,
504 RING_MI_MODE(engine->mmio_base),
505 MODE_IDLE,
506 MODE_IDLE,
507 1000)) {
0bc40be8
TU
508 DRM_ERROR("%s : timed out trying to stop ring\n",
509 engine->name);
9bec9b13
CW
510 /* Sometimes we observe that the idle flag is not
511 * set even though the ring is empty. So double
512 * check before giving up.
513 */
0bc40be8 514 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
9bec9b13 515 return false;
9991ae78
CW
516 }
517 }
b7884eb4 518
0bc40be8
TU
519 I915_WRITE_CTL(engine, 0);
520 I915_WRITE_HEAD(engine, 0);
c5efa1ad 521 I915_WRITE_TAIL(engine, 0);
8187a2b7 522
21a2c58a 523 if (INTEL_GEN(dev_priv) > 2) {
0bc40be8
TU
524 (void)I915_READ_CTL(engine);
525 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
9991ae78 526 }
a51435a3 527
0bc40be8 528 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
9991ae78 529}
8187a2b7 530
0bc40be8 531static int init_ring_common(struct intel_engine_cs *engine)
9991ae78 532{
c033666a 533 struct drm_i915_private *dev_priv = engine->i915;
7e37f889 534 struct intel_ring *ring = engine->buffer;
9991ae78
CW
535 int ret = 0;
536
59bad947 537 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9991ae78 538
0bc40be8 539 if (!stop_ring(engine)) {
9991ae78 540 /* G45 ring initialization often fails to reset head to zero */
6fd0d56e
CW
541 DRM_DEBUG_KMS("%s head not reset to zero "
542 "ctl %08x head %08x tail %08x start %08x\n",
0bc40be8
TU
543 engine->name,
544 I915_READ_CTL(engine),
545 I915_READ_HEAD(engine),
546 I915_READ_TAIL(engine),
547 I915_READ_START(engine));
8187a2b7 548
0bc40be8 549 if (!stop_ring(engine)) {
6fd0d56e
CW
550 DRM_ERROR("failed to set %s head to zero "
551 "ctl %08x head %08x tail %08x start %08x\n",
0bc40be8
TU
552 engine->name,
553 I915_READ_CTL(engine),
554 I915_READ_HEAD(engine),
555 I915_READ_TAIL(engine),
556 I915_READ_START(engine));
9991ae78
CW
557 ret = -EIO;
558 goto out;
6fd0d56e 559 }
8187a2b7
ZN
560 }
561
3177659a 562 if (HWS_NEEDS_PHYSICAL(dev_priv))
0bc40be8 563 ring_setup_phys_status_page(engine);
3177659a
CS
564 else
565 intel_ring_setup_status_page(engine);
9991ae78 566
821ed7df
CW
567 intel_engine_reset_irq(engine);
568
ece4a17d 569 /* Enforce ordering by reading HEAD register back */
0bc40be8 570 I915_READ_HEAD(engine);
ece4a17d 571
0d8957c8
SV
572 /* Initialize the ring. This must happen _after_ we've cleared the ring
573 * registers with the above sequence (the readback of the HEAD registers
574 * also enforces ordering), otherwise the hw might lose the new ring
575 * register values. */
bde13ebd 576 I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
95468892
CW
577
578 /* WaClearRingBufHeadRegAtInit:ctg,elk */
0bc40be8 579 if (I915_READ_HEAD(engine))
95468892 580 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
0bc40be8 581 engine->name, I915_READ_HEAD(engine));
821ed7df
CW
582
583 intel_ring_update_space(ring);
584 I915_WRITE_HEAD(engine, ring->head);
585 I915_WRITE_TAIL(engine, ring->tail);
586 (void)I915_READ_TAIL(engine);
95468892 587
0bc40be8 588 I915_WRITE_CTL(engine,
7e37f889 589 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
5d031e5b 590 | RING_VALID);
8187a2b7 591
8187a2b7 592 /* If the head is still not zero, the ring is dead */
821ed7df
CW
593 if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
594 RING_VALID, RING_VALID,
595 50)) {
e74cfed5 596 DRM_ERROR("%s initialization failed "
821ed7df 597 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
0bc40be8
TU
598 engine->name,
599 I915_READ_CTL(engine),
600 I915_READ_CTL(engine) & RING_VALID,
821ed7df
CW
601 I915_READ_HEAD(engine), ring->head,
602 I915_READ_TAIL(engine), ring->tail,
0bc40be8 603 I915_READ_START(engine),
bde13ebd 604 i915_ggtt_offset(ring->vma));
b7884eb4
SV
605 ret = -EIO;
606 goto out;
8187a2b7
ZN
607 }
608
fc0768ce 609 intel_engine_init_hangcheck(engine);
50f018df 610
b7884eb4 611out:
59bad947 612 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
b7884eb4
SV
613
614 return ret;
8187a2b7
ZN
615}
616
821ed7df
CW
617static void reset_ring_common(struct intel_engine_cs *engine,
618 struct drm_i915_gem_request *request)
619{
620 struct intel_ring *ring = request->ring;
621
622 ring->head = request->postfix;
623 ring->last_retired_head = -1;
624}
625
e2be4faf 626static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
86d7f238 627{
7e37f889 628 struct intel_ring *ring = req->ring;
c033666a
CW
629 struct i915_workarounds *w = &req->i915->workarounds;
630 int ret, i;
888b5995 631
02235808 632 if (w->count == 0)
7225342a 633 return 0;
888b5995 634
7c9cf4e3 635 ret = req->engine->emit_flush(req, EMIT_BARRIER);
7225342a
MK
636 if (ret)
637 return ret;
888b5995 638
5fb9de1a 639 ret = intel_ring_begin(req, (w->count * 2 + 2));
7225342a
MK
640 if (ret)
641 return ret;
642
b5321f30 643 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
7225342a 644 for (i = 0; i < w->count; i++) {
b5321f30
CW
645 intel_ring_emit_reg(ring, w->reg[i].addr);
646 intel_ring_emit(ring, w->reg[i].value);
7225342a 647 }
b5321f30 648 intel_ring_emit(ring, MI_NOOP);
7225342a 649
b5321f30 650 intel_ring_advance(ring);
7225342a 651
7c9cf4e3 652 ret = req->engine->emit_flush(req, EMIT_BARRIER);
7225342a
MK
653 if (ret)
654 return ret;
888b5995 655
7225342a 656 DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
888b5995 657
7225342a 658 return 0;
86d7f238
AS
659}
660
8753181e 661static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
8f0e2b9d
SV
662{
663 int ret;
664
e2be4faf 665 ret = intel_ring_workarounds_emit(req);
8f0e2b9d
SV
666 if (ret != 0)
667 return ret;
668
be01363f 669 ret = i915_gem_render_state_init(req);
8f0e2b9d 670 if (ret)
e26e1b97 671 return ret;
8f0e2b9d 672
e26e1b97 673 return 0;
8f0e2b9d
SV
674}
675
7225342a 676static int wa_add(struct drm_i915_private *dev_priv,
f0f59a00
VS
677 i915_reg_t addr,
678 const u32 mask, const u32 val)
7225342a
MK
679{
680 const u32 idx = dev_priv->workarounds.count;
681
682 if (WARN_ON(idx >= I915_MAX_WA_REGS))
683 return -ENOSPC;
684
685 dev_priv->workarounds.reg[idx].addr = addr;
686 dev_priv->workarounds.reg[idx].value = val;
687 dev_priv->workarounds.reg[idx].mask = mask;
688
689 dev_priv->workarounds.count++;
690
691 return 0;
86d7f238
AS
692}
693
ca5a0fbd 694#define WA_REG(addr, mask, val) do { \
cf4b0de6 695 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
7225342a
MK
696 if (r) \
697 return r; \
ca5a0fbd 698 } while (0)
7225342a
MK
699
700#define WA_SET_BIT_MASKED(addr, mask) \
26459343 701 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
7225342a
MK
702
703#define WA_CLR_BIT_MASKED(addr, mask) \
26459343 704 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
7225342a 705
98533251 706#define WA_SET_FIELD_MASKED(addr, mask, value) \
cf4b0de6 707 WA_REG(addr, mask, _MASKED_FIELD(mask, value))
7225342a 708
cf4b0de6
DL
709#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
710#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
7225342a 711
cf4b0de6 712#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
7225342a 713
0bc40be8
TU
714static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
715 i915_reg_t reg)
33136b06 716{
c033666a 717 struct drm_i915_private *dev_priv = engine->i915;
33136b06 718 struct i915_workarounds *wa = &dev_priv->workarounds;
0bc40be8 719 const uint32_t index = wa->hw_whitelist_count[engine->id];
33136b06
AS
720
721 if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
722 return -EINVAL;
723
0bc40be8 724 WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
33136b06 725 i915_mmio_reg_offset(reg));
0bc40be8 726 wa->hw_whitelist_count[engine->id]++;
33136b06
AS
727
728 return 0;
729}
730
0bc40be8 731static int gen8_init_workarounds(struct intel_engine_cs *engine)
e9a64ada 732{
c033666a 733 struct drm_i915_private *dev_priv = engine->i915;
68c6198b
AS
734
735 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
e9a64ada 736
717d84d6
AS
737 /* WaDisableAsyncFlipPerfMode:bdw,chv */
738 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
739
d0581194
AS
740 /* WaDisablePartialInstShootdown:bdw,chv */
741 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
742 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
743
a340af58
AS
744 /* Use Force Non-Coherent whenever executing a 3D context. This is a
745 * workaround for for a possible hang in the unlikely event a TLB
746 * invalidation occurs during a PSD flush.
747 */
748 /* WaForceEnableNonCoherent:bdw,chv */
120f5d28 749 /* WaHdcDisableFetchWhenMasked:bdw,chv */
a340af58 750 WA_SET_BIT_MASKED(HDC_CHICKEN0,
120f5d28 751 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
a340af58
AS
752 HDC_FORCE_NON_COHERENT);
753
6def8fdd
AS
754 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
755 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
756 * polygons in the same 8x4 pixel/sample area to be processed without
757 * stalling waiting for the earlier ones to write to Hierarchical Z
758 * buffer."
759 *
760 * This optimization is off by default for BDW and CHV; turn it on.
761 */
762 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
763
48404636
AS
764 /* Wa4x4STCOptimizationDisable:bdw,chv */
765 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
766
7eebcde6
AS
767 /*
768 * BSpec recommends 8x4 when MSAA is used,
769 * however in practice 16x4 seems fastest.
770 *
771 * Note that PS/WM thread counts depend on the WIZ hashing
772 * disable bit, which we don't touch here, but it's good
773 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
774 */
775 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
776 GEN6_WIZ_HASHING_MASK,
777 GEN6_WIZ_HASHING_16x4);
778
e9a64ada
AS
779 return 0;
780}
781
0bc40be8 782static int bdw_init_workarounds(struct intel_engine_cs *engine)
86d7f238 783{
c033666a 784 struct drm_i915_private *dev_priv = engine->i915;
e9a64ada 785 int ret;
86d7f238 786
0bc40be8 787 ret = gen8_init_workarounds(engine);
e9a64ada
AS
788 if (ret)
789 return ret;
790
101b376d 791 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
d0581194 792 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
86d7f238 793
101b376d 794 /* WaDisableDopClockGating:bdw */
7225342a
MK
795 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
796 DOP_CLOCK_GATING_DISABLE);
86d7f238 797
7225342a
MK
798 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
799 GEN8_SAMPLER_POWER_BYPASS_DIS);
86d7f238 800
7225342a 801 WA_SET_BIT_MASKED(HDC_CHICKEN0,
35cb6f3b
DL
802 /* WaForceContextSaveRestoreNonCoherent:bdw */
803 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
35cb6f3b 804 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
c033666a 805 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
86d7f238 806
86d7f238
AS
807 return 0;
808}
809
0bc40be8 810static int chv_init_workarounds(struct intel_engine_cs *engine)
00e1e623 811{
c033666a 812 struct drm_i915_private *dev_priv = engine->i915;
e9a64ada 813 int ret;
00e1e623 814
0bc40be8 815 ret = gen8_init_workarounds(engine);
e9a64ada
AS
816 if (ret)
817 return ret;
818
00e1e623 819 /* WaDisableThreadStallDopClockGating:chv */
d0581194 820 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
00e1e623 821
d60de81d
KG
822 /* Improve HiZ throughput on CHV. */
823 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
824
7225342a
MK
825 return 0;
826}
827
0bc40be8 828static int gen9_init_workarounds(struct intel_engine_cs *engine)
3b106531 829{
c033666a 830 struct drm_i915_private *dev_priv = engine->i915;
e0f3fa09 831 int ret;
ab0dfafe 832
a8ab5ed5
TG
833 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
834 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
835
e5f81d65 836 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
9c4cbf82
MK
837 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
838 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
839
e5f81d65 840 /* WaDisableKillLogic:bxt,skl,kbl */
9c4cbf82
MK
841 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
842 ECOCHK_DIS_TLB);
843
e5f81d65
MK
844 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
845 /* WaDisablePartialInstShootdown:skl,bxt,kbl */
ab0dfafe 846 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
950b2aae 847 FLOW_CONTROL_ENABLE |
ab0dfafe
HN
848 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
849
e5f81d65 850 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
8424171e
NH
851 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
852 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
853
a117f378
JN
854 /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
855 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
a86eb582
DL
856 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
857 GEN9_DG_MIRROR_FIX_ENABLE);
1de4582f 858
a117f378
JN
859 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
860 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
183c6dac
DL
861 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
862 GEN9_RHWO_OPTIMIZATION_DISABLE);
9b01435d
AS
863 /*
864 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
865 * but we do that in per ctx batchbuffer as there is an issue
866 * with this register not getting restored on ctx restore
867 */
183c6dac
DL
868 }
869
e5f81d65
MK
870 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
871 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
bfd8ad4e
TG
872 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
873 GEN9_ENABLE_YV12_BUGFIX |
874 GEN9_ENABLE_GPGPU_PREEMPTION);
cac23df4 875
e5f81d65
MK
876 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
877 /* WaDisablePartialResolveInVc:skl,bxt,kbl */
60294683
AS
878 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
879 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
9370cd98 880
e5f81d65 881 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
e2db7071
DL
882 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
883 GEN9_CCS_TLB_PREFETCH_ENABLE);
884
0d0b8dcf
JN
885 /* WaDisableMaskBasedCammingInRCC:bxt */
886 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
38a39a7b
BW
887 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
888 PIXEL_MASK_CAMMING_DISABLE);
889
5b0e3659
MK
890 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
891 WA_SET_BIT_MASKED(HDC_CHICKEN0,
892 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
893 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
8ea6f892 894
bbaefe72
MK
895 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
896 * both tied to WaForceContextSaveRestoreNonCoherent
897 * in some hsds for skl. We keep the tie for all gen9. The
898 * documentation is a bit hazy and so we want to get common behaviour,
899 * even though there is no clear evidence we would need both on kbl/bxt.
900 * This area has been source of system hangs so we play it safe
901 * and mimic the skl regardless of what bspec says.
902 *
903 * Use Force Non-Coherent whenever executing a 3D context. This
904 * is a workaround for a possible hang in the unlikely event
905 * a TLB invalidation occurs during a PSD flush.
906 */
907
908 /* WaForceEnableNonCoherent:skl,bxt,kbl */
909 WA_SET_BIT_MASKED(HDC_CHICKEN0,
910 HDC_FORCE_NON_COHERENT);
911
912 /* WaDisableHDCInvalidation:skl,bxt,kbl */
913 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
914 BDW_DISABLE_HDC_INVALIDATION);
915
e5f81d65
MK
916 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
917 if (IS_SKYLAKE(dev_priv) ||
918 IS_KABYLAKE(dev_priv) ||
919 IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
8c761609
AS
920 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
921 GEN8_SAMPLER_POWER_BYPASS_DIS);
8c761609 922
e5f81d65 923 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
6b6d5626
RB
924 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
925
e5f81d65 926 /* WaOCLCoherentLineFlush:skl,bxt,kbl */
6ecf56ae
AS
927 I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
928 GEN8_LQSC_FLUSH_COHERENT_LINES));
929
6bb62855 930 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
931 ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
932 if (ret)
933 return ret;
934
e5f81d65 935 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
0bc40be8 936 ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
e0f3fa09
AS
937 if (ret)
938 return ret;
939
e5f81d65 940 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
0bc40be8 941 ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
3669ab61
AS
942 if (ret)
943 return ret;
944
3b106531
HN
945 return 0;
946}
947
0bc40be8 948static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
b7668791 949{
c033666a 950 struct drm_i915_private *dev_priv = engine->i915;
b7668791
DL
951 u8 vals[3] = { 0, 0, 0 };
952 unsigned int i;
953
954 for (i = 0; i < 3; i++) {
955 u8 ss;
956
957 /*
958 * Only consider slices where one, and only one, subslice has 7
959 * EUs
960 */
43b67998 961 if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
b7668791
DL
962 continue;
963
964 /*
965 * subslice_7eu[i] != 0 (because of the check above) and
966 * ss_max == 4 (maximum number of subslices possible per slice)
967 *
968 * -> 0 <= ss <= 3;
969 */
43b67998 970 ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
b7668791
DL
971 vals[i] = 3 - ss;
972 }
973
974 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
975 return 0;
976
977 /* Tune IZ hashing. See intel_device_info_runtime_init() */
978 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
979 GEN9_IZ_HASHING_MASK(2) |
980 GEN9_IZ_HASHING_MASK(1) |
981 GEN9_IZ_HASHING_MASK(0),
982 GEN9_IZ_HASHING(2, vals[2]) |
983 GEN9_IZ_HASHING(1, vals[1]) |
984 GEN9_IZ_HASHING(0, vals[0]));
985
986 return 0;
987}
988
0bc40be8 989static int skl_init_workarounds(struct intel_engine_cs *engine)
8d205494 990{
c033666a 991 struct drm_i915_private *dev_priv = engine->i915;
aa0011a8 992 int ret;
d0bbbc4f 993
0bc40be8 994 ret = gen9_init_workarounds(engine);
aa0011a8
AS
995 if (ret)
996 return ret;
8d205494 997
a78536e7
AS
998 /*
999 * Actual WA is to disable percontext preemption granularity control
1000 * until D0 which is the default case so this is equivalent to
1001 * !WaDisablePerCtxtPreemptionGranularityControl:skl
1002 */
c033666a 1003 if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
a78536e7
AS
1004 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
1005 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
1006 }
1007
71dce58c 1008 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) {
9c4cbf82
MK
1009 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
1010 I915_WRITE(FF_SLICE_CS_CHICKEN2,
1011 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
1012 }
1013
1014 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1015 * involving this register should also be added to WA batch as required.
1016 */
c033666a 1017 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
9c4cbf82
MK
1018 /* WaDisableLSQCROPERFforOCL:skl */
1019 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1020 GEN8_LQSC_RO_PERF_DIS);
1021
1022 /* WaEnableGapsTsvCreditFix:skl */
a117f378
JN
1023 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1024 GEN9_GAPS_TSV_CREDIT_DISABLE));
d0bbbc4f 1025
e87a005d 1026 /* WaBarrierPerformanceFixDisable:skl */
c033666a 1027 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
5b6fd12a
VS
1028 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1029 HDC_FENCE_DEST_SLM_DISABLE |
1030 HDC_BARRIER_PERFORMANCE_DISABLE);
1031
9bd9dfb4 1032 /* WaDisableSbeCacheDispatchPortSharing:skl */
c033666a 1033 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
9bd9dfb4
MK
1034 WA_SET_BIT_MASKED(
1035 GEN7_HALF_SLICE_CHICKEN1,
1036 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
9bd9dfb4 1037
eee8efb0
MK
1038 /* WaDisableGafsUnitClkGating:skl */
1039 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1040
4ba9c1f7
MK
1041 /* WaInPlaceDecompressionHang:skl */
1042 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
1043 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1044 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1045
6107497e 1046 /* WaDisableLSQCROPERFforOCL:skl */
0bc40be8 1047 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
6107497e
AS
1048 if (ret)
1049 return ret;
1050
0bc40be8 1051 return skl_tune_iz_hashing(engine);
7225342a
MK
1052}
1053
0bc40be8 1054static int bxt_init_workarounds(struct intel_engine_cs *engine)
cae0437f 1055{
c033666a 1056 struct drm_i915_private *dev_priv = engine->i915;
aa0011a8 1057 int ret;
dfb601e6 1058
0bc40be8 1059 ret = gen9_init_workarounds(engine);
aa0011a8
AS
1060 if (ret)
1061 return ret;
cae0437f 1062
9c4cbf82
MK
1063 /* WaStoreMultiplePTEenable:bxt */
1064 /* This is a requirement according to Hardware specification */
c033666a 1065 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
9c4cbf82
MK
1066 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1067
1068 /* WaSetClckGatingDisableMedia:bxt */
c033666a 1069 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
9c4cbf82
MK
1070 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1071 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1072 }
1073
dfb601e6
NH
1074 /* WaDisableThreadStallDopClockGating:bxt */
1075 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1076 STALL_DOP_GATING_DISABLE);
1077
780f0aeb 1078 /* WaDisablePooledEuLoadBalancingFix:bxt */
1079 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1080 WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
1081 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
1082 }
1083
983b4b9d 1084 /* WaDisableSbeCacheDispatchPortSharing:bxt */
c033666a 1085 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
983b4b9d
NH
1086 WA_SET_BIT_MASKED(
1087 GEN7_HALF_SLICE_CHICKEN1,
1088 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1089 }
1090
2c8580e4
AS
1091 /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
1092 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
1093 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
a786d53a 1094 /* WaDisableLSQCROPERFforOCL:bxt */
c033666a 1095 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
0bc40be8 1096 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
2c8580e4
AS
1097 if (ret)
1098 return ret;
a786d53a 1099
0bc40be8 1100 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
a786d53a
AS
1101 if (ret)
1102 return ret;
2c8580e4
AS
1103 }
1104
050fc465 1105 /* WaProgramL3SqcReg1DefaultForPerf:bxt */
c033666a 1106 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
36579cb6
ID
1107 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
1108 L3_HIGH_PRIO_CREDITS(2));
050fc465 1109
575e3ccb
MA
1110 /* WaToEnableHwFixForPushConstHWBug:bxt */
1111 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
ad2bdb44
MK
1112 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1113 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1114
4ba9c1f7
MK
1115 /* WaInPlaceDecompressionHang:bxt */
1116 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1117 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1118 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1119
cae0437f
NH
1120 return 0;
1121}
1122
e5f81d65
MK
1123static int kbl_init_workarounds(struct intel_engine_cs *engine)
1124{
e587f6cb 1125 struct drm_i915_private *dev_priv = engine->i915;
e5f81d65
MK
1126 int ret;
1127
1128 ret = gen9_init_workarounds(engine);
1129 if (ret)
1130 return ret;
1131
e587f6cb
MK
1132 /* WaEnableGapsTsvCreditFix:kbl */
1133 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1134 GEN9_GAPS_TSV_CREDIT_DISABLE));
1135
c0b730d5
MK
1136 /* WaDisableDynamicCreditSharing:kbl */
1137 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
1138 WA_SET_BIT(GAMT_CHKN_BIT_REG,
1139 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1140
8401d42f
MK
1141 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
1142 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
1143 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1144 HDC_FENCE_DEST_SLM_DISABLE);
1145
fe905819
MK
1146 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1147 * involving this register should also be added to WA batch as required.
1148 */
1149 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
1150 /* WaDisableLSQCROPERFforOCL:kbl */
1151 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1152 GEN8_LQSC_RO_PERF_DIS);
1153
575e3ccb
MA
1154 /* WaToEnableHwFixForPushConstHWBug:kbl */
1155 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
ad2bdb44
MK
1156 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1157 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1158
4de5d7cc
MK
1159 /* WaDisableGafsUnitClkGating:kbl */
1160 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1161
954337aa
MK
1162 /* WaDisableSbeCacheDispatchPortSharing:kbl */
1163 WA_SET_BIT_MASKED(
1164 GEN7_HALF_SLICE_CHICKEN1,
1165 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1166
4ba9c1f7
MK
1167 /* WaInPlaceDecompressionHang:kbl */
1168 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1169 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1170
fe905819
MK
1171 /* WaDisableLSQCROPERFforOCL:kbl */
1172 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1173 if (ret)
1174 return ret;
1175
e5f81d65
MK
1176 return 0;
1177}
1178
0bc40be8 1179int init_workarounds_ring(struct intel_engine_cs *engine)
7225342a 1180{
c033666a 1181 struct drm_i915_private *dev_priv = engine->i915;
7225342a 1182
0bc40be8 1183 WARN_ON(engine->id != RCS);
7225342a
MK
1184
1185 dev_priv->workarounds.count = 0;
33136b06 1186 dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
7225342a 1187
c033666a 1188 if (IS_BROADWELL(dev_priv))
0bc40be8 1189 return bdw_init_workarounds(engine);
7225342a 1190
c033666a 1191 if (IS_CHERRYVIEW(dev_priv))
0bc40be8 1192 return chv_init_workarounds(engine);
00e1e623 1193
c033666a 1194 if (IS_SKYLAKE(dev_priv))
0bc40be8 1195 return skl_init_workarounds(engine);
cae0437f 1196
c033666a 1197 if (IS_BROXTON(dev_priv))
0bc40be8 1198 return bxt_init_workarounds(engine);
3b106531 1199
e5f81d65
MK
1200 if (IS_KABYLAKE(dev_priv))
1201 return kbl_init_workarounds(engine);
1202
00e1e623
VS
1203 return 0;
1204}
1205
0bc40be8 1206static int init_render_ring(struct intel_engine_cs *engine)
8187a2b7 1207{
c033666a 1208 struct drm_i915_private *dev_priv = engine->i915;
0bc40be8 1209 int ret = init_ring_common(engine);
9c33baa6
KZ
1210 if (ret)
1211 return ret;
a69ffdbf 1212
61a563a2 1213 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
ac657f64 1214 if (IS_GEN(dev_priv, 4, 6))
6b26c86d 1215 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
1c8c38c5
CW
1216
1217 /* We need to disable the AsyncFlip performance optimisations in order
1218 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1219 * programmed to '1' on all products.
8693a824 1220 *
2441f877 1221 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
1c8c38c5 1222 */
ac657f64 1223 if (IS_GEN(dev_priv, 6, 7))
1c8c38c5
CW
1224 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1225
f05bb0c7 1226 /* Required for the hardware to program scanline values for waiting */
01fa0302 1227 /* WaEnableFlushTlbInvalidationMode:snb */
c033666a 1228 if (IS_GEN6(dev_priv))
f05bb0c7 1229 I915_WRITE(GFX_MODE,
aa83e30d 1230 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
f05bb0c7 1231
01fa0302 1232 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
c033666a 1233 if (IS_GEN7(dev_priv))
1c8c38c5 1234 I915_WRITE(GFX_MODE_GEN7,
01fa0302 1235 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
1c8c38c5 1236 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
78501eac 1237
c033666a 1238 if (IS_GEN6(dev_priv)) {
3a69ddd6
KG
1239 /* From the Sandybridge PRM, volume 1 part 3, page 24:
1240 * "If this bit is set, STCunit will have LRA as replacement
1241 * policy. [...] This bit must be reset. LRA replacement
1242 * policy is not supported."
1243 */
1244 I915_WRITE(CACHE_MODE_0,
5e13a0c5 1245 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
84f9f938
BW
1246 }
1247
ac657f64 1248 if (IS_GEN(dev_priv, 6, 7))
6b26c86d 1249 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
84f9f938 1250
035ea405
VS
1251 if (INTEL_INFO(dev_priv)->gen >= 6)
1252 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
15b9f80e 1253
0bc40be8 1254 return init_workarounds_ring(engine);
8187a2b7
ZN
1255}
1256
0bc40be8 1257static void render_ring_cleanup(struct intel_engine_cs *engine)
c6df541c 1258{
c033666a 1259 struct drm_i915_private *dev_priv = engine->i915;
3e78998a 1260
19880c4a 1261 i915_vma_unpin_and_release(&dev_priv->semaphore);
c6df541c
CW
1262}
1263
ad7bdb2b 1264static int gen8_rcs_signal(struct drm_i915_gem_request *req)
3e78998a 1265{
ad7bdb2b
CW
1266 struct intel_ring *ring = req->ring;
1267 struct drm_i915_private *dev_priv = req->i915;
3e78998a 1268 struct intel_engine_cs *waiter;
c3232b18
DG
1269 enum intel_engine_id id;
1270 int ret, num_rings;
3e78998a 1271
c1bb1145 1272 num_rings = INTEL_INFO(dev_priv)->num_rings;
ad7bdb2b 1273 ret = intel_ring_begin(req, (num_rings-1) * 8);
3e78998a
BW
1274 if (ret)
1275 return ret;
1276
c3232b18 1277 for_each_engine_id(waiter, dev_priv, id) {
ad7bdb2b 1278 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
3e78998a
BW
1279 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1280 continue;
1281
ad7bdb2b
CW
1282 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1283 intel_ring_emit(ring,
b5321f30
CW
1284 PIPE_CONTROL_GLOBAL_GTT_IVB |
1285 PIPE_CONTROL_QW_WRITE |
1286 PIPE_CONTROL_CS_STALL);
ad7bdb2b
CW
1287 intel_ring_emit(ring, lower_32_bits(gtt_offset));
1288 intel_ring_emit(ring, upper_32_bits(gtt_offset));
1289 intel_ring_emit(ring, req->fence.seqno);
1290 intel_ring_emit(ring, 0);
1291 intel_ring_emit(ring,
b5321f30
CW
1292 MI_SEMAPHORE_SIGNAL |
1293 MI_SEMAPHORE_TARGET(waiter->hw_id));
ad7bdb2b 1294 intel_ring_emit(ring, 0);
3e78998a 1295 }
ad7bdb2b 1296 intel_ring_advance(ring);
3e78998a
BW
1297
1298 return 0;
1299}
1300
ad7bdb2b 1301static int gen8_xcs_signal(struct drm_i915_gem_request *req)
3e78998a 1302{
ad7bdb2b
CW
1303 struct intel_ring *ring = req->ring;
1304 struct drm_i915_private *dev_priv = req->i915;
3e78998a 1305 struct intel_engine_cs *waiter;
c3232b18
DG
1306 enum intel_engine_id id;
1307 int ret, num_rings;
3e78998a 1308
c1bb1145 1309 num_rings = INTEL_INFO(dev_priv)->num_rings;
ad7bdb2b 1310 ret = intel_ring_begin(req, (num_rings-1) * 6);
3e78998a
BW
1311 if (ret)
1312 return ret;
1313
c3232b18 1314 for_each_engine_id(waiter, dev_priv, id) {
ad7bdb2b 1315 u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
3e78998a
BW
1316 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1317 continue;
1318
ad7bdb2b 1319 intel_ring_emit(ring,
b5321f30 1320 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
ad7bdb2b 1321 intel_ring_emit(ring,
b5321f30
CW
1322 lower_32_bits(gtt_offset) |
1323 MI_FLUSH_DW_USE_GTT);
ad7bdb2b
CW
1324 intel_ring_emit(ring, upper_32_bits(gtt_offset));
1325 intel_ring_emit(ring, req->fence.seqno);
1326 intel_ring_emit(ring,
b5321f30
CW
1327 MI_SEMAPHORE_SIGNAL |
1328 MI_SEMAPHORE_TARGET(waiter->hw_id));
ad7bdb2b 1329 intel_ring_emit(ring, 0);
3e78998a 1330 }
ad7bdb2b 1331 intel_ring_advance(ring);
3e78998a
BW
1332
1333 return 0;
1334}
1335
ad7bdb2b 1336static int gen6_signal(struct drm_i915_gem_request *req)
1ec14ad3 1337{
ad7bdb2b
CW
1338 struct intel_ring *ring = req->ring;
1339 struct drm_i915_private *dev_priv = req->i915;
318f89ca 1340 struct intel_engine_cs *engine;
c3232b18 1341 int ret, num_rings;
78325f2d 1342
c1bb1145 1343 num_rings = INTEL_INFO(dev_priv)->num_rings;
ad7bdb2b 1344 ret = intel_ring_begin(req, round_up((num_rings-1) * 3, 2));
024a43e1
BW
1345 if (ret)
1346 return ret;
024a43e1 1347
318f89ca
TU
1348 for_each_engine(engine, dev_priv) {
1349 i915_reg_t mbox_reg;
1350
1351 if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
1352 continue;
f0f59a00 1353
318f89ca 1354 mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
f0f59a00 1355 if (i915_mmio_reg_valid(mbox_reg)) {
ad7bdb2b
CW
1356 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1357 intel_ring_emit_reg(ring, mbox_reg);
1358 intel_ring_emit(ring, req->fence.seqno);
78325f2d
BW
1359 }
1360 }
024a43e1 1361
a1444b79
BW
1362 /* If num_dwords was rounded, make sure the tail pointer is correct */
1363 if (num_rings % 2 == 0)
ad7bdb2b
CW
1364 intel_ring_emit(ring, MI_NOOP);
1365 intel_ring_advance(ring);
a1444b79 1366
024a43e1 1367 return 0;
1ec14ad3
CW
1368}
1369
b0411e7d
CW
1370static void i9xx_submit_request(struct drm_i915_gem_request *request)
1371{
1372 struct drm_i915_private *dev_priv = request->i915;
1373
1374 I915_WRITE_TAIL(request->engine,
1375 intel_ring_offset(request->ring, request->tail));
1376}
1377
1378static int i9xx_emit_request(struct drm_i915_gem_request *req)
1ec14ad3 1379{
7e37f889 1380 struct intel_ring *ring = req->ring;
024a43e1 1381 int ret;
52ed2325 1382
9242f974 1383 ret = intel_ring_begin(req, 4);
1ec14ad3
CW
1384 if (ret)
1385 return ret;
1386
b5321f30
CW
1387 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1388 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1389 intel_ring_emit(ring, req->fence.seqno);
1390 intel_ring_emit(ring, MI_USER_INTERRUPT);
c5efa1ad
CW
1391 intel_ring_advance(ring);
1392
1393 req->tail = ring->tail;
1ec14ad3 1394
1ec14ad3
CW
1395 return 0;
1396}
1397
b0411e7d 1398/**
618e4ca7 1399 * gen6_sema_emit_request - Update the semaphore mailbox registers
b0411e7d
CW
1400 *
1401 * @request - request to write to the ring
1402 *
1403 * Update the mailbox registers in the *other* rings with the current seqno.
1404 * This acts like a signal in the canonical semaphore.
1405 */
618e4ca7 1406static int gen6_sema_emit_request(struct drm_i915_gem_request *req)
b0411e7d 1407{
618e4ca7 1408 int ret;
b0411e7d 1409
618e4ca7
CW
1410 ret = req->engine->semaphore.signal(req);
1411 if (ret)
1412 return ret;
b0411e7d
CW
1413
1414 return i9xx_emit_request(req);
1415}
1416
ddd66c51 1417static int gen8_render_emit_request(struct drm_i915_gem_request *req)
a58c01aa
CW
1418{
1419 struct intel_engine_cs *engine = req->engine;
7e37f889 1420 struct intel_ring *ring = req->ring;
a58c01aa
CW
1421 int ret;
1422
9242f974
CW
1423 if (engine->semaphore.signal) {
1424 ret = engine->semaphore.signal(req);
1425 if (ret)
1426 return ret;
1427 }
1428
1429 ret = intel_ring_begin(req, 8);
a58c01aa
CW
1430 if (ret)
1431 return ret;
1432
b5321f30
CW
1433 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1434 intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB |
1435 PIPE_CONTROL_CS_STALL |
1436 PIPE_CONTROL_QW_WRITE));
1437 intel_ring_emit(ring, intel_hws_seqno_address(engine));
1438 intel_ring_emit(ring, 0);
1439 intel_ring_emit(ring, i915_gem_request_get_seqno(req));
a58c01aa 1440 /* We're thrashing one dword of HWS. */
b5321f30
CW
1441 intel_ring_emit(ring, 0);
1442 intel_ring_emit(ring, MI_USER_INTERRUPT);
1443 intel_ring_emit(ring, MI_NOOP);
ddd66c51 1444 intel_ring_advance(ring);
c5efa1ad
CW
1445
1446 req->tail = ring->tail;
a58c01aa
CW
1447
1448 return 0;
1449}
1450
c8c99b0f
BW
1451/**
1452 * intel_ring_sync - sync the waiter to the signaller on seqno
1453 *
1454 * @waiter - ring that is waiting
1455 * @signaller - ring which has, or will signal
1456 * @seqno - seqno which the waiter will block on
1457 */
5ee426ca
BW
1458
1459static int
ad7bdb2b
CW
1460gen8_ring_sync_to(struct drm_i915_gem_request *req,
1461 struct drm_i915_gem_request *signal)
5ee426ca 1462{
ad7bdb2b
CW
1463 struct intel_ring *ring = req->ring;
1464 struct drm_i915_private *dev_priv = req->i915;
1465 u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
6ef48d7f 1466 struct i915_hw_ppgtt *ppgtt;
5ee426ca
BW
1467 int ret;
1468
ad7bdb2b 1469 ret = intel_ring_begin(req, 4);
5ee426ca
BW
1470 if (ret)
1471 return ret;
1472
ad7bdb2b
CW
1473 intel_ring_emit(ring,
1474 MI_SEMAPHORE_WAIT |
1475 MI_SEMAPHORE_GLOBAL_GTT |
1476 MI_SEMAPHORE_SAD_GTE_SDD);
1477 intel_ring_emit(ring, signal->fence.seqno);
1478 intel_ring_emit(ring, lower_32_bits(offset));
1479 intel_ring_emit(ring, upper_32_bits(offset));
1480 intel_ring_advance(ring);
6ef48d7f
CW
1481
1482 /* When the !RCS engines idle waiting upon a semaphore, they lose their
1483 * pagetables and we must reload them before executing the batch.
1484 * We do this on the i915_switch_context() following the wait and
1485 * before the dispatch.
1486 */
ad7bdb2b
CW
1487 ppgtt = req->ctx->ppgtt;
1488 if (ppgtt && req->engine->id != RCS)
1489 ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
5ee426ca
BW
1490 return 0;
1491}
1492
c8c99b0f 1493static int
ad7bdb2b
CW
1494gen6_ring_sync_to(struct drm_i915_gem_request *req,
1495 struct drm_i915_gem_request *signal)
1ec14ad3 1496{
ad7bdb2b 1497 struct intel_ring *ring = req->ring;
c8c99b0f
BW
1498 u32 dw1 = MI_SEMAPHORE_MBOX |
1499 MI_SEMAPHORE_COMPARE |
1500 MI_SEMAPHORE_REGISTER;
318f89ca 1501 u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
ebc348b2 1502 int ret;
1ec14ad3 1503
ebc348b2 1504 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
686cb5f9 1505
ad7bdb2b 1506 ret = intel_ring_begin(req, 4);
1ec14ad3
CW
1507 if (ret)
1508 return ret;
1509
ad7bdb2b 1510 intel_ring_emit(ring, dw1 | wait_mbox);
ddf07be7
CW
1511 /* Throughout all of the GEM code, seqno passed implies our current
1512 * seqno is >= the last seqno executed. However for hardware the
1513 * comparison is strictly greater than.
1514 */
ad7bdb2b
CW
1515 intel_ring_emit(ring, signal->fence.seqno - 1);
1516 intel_ring_emit(ring, 0);
1517 intel_ring_emit(ring, MI_NOOP);
1518 intel_ring_advance(ring);
1ec14ad3
CW
1519
1520 return 0;
1521}
1522
f8973c21 1523static void
38a0f2db 1524gen5_seqno_barrier(struct intel_engine_cs *engine)
c6df541c 1525{
f8973c21
CW
1526 /* MI_STORE are internally buffered by the GPU and not flushed
1527 * either by MI_FLUSH or SyncFlush or any other combination of
1528 * MI commands.
c6df541c 1529 *
f8973c21
CW
1530 * "Only the submission of the store operation is guaranteed.
1531 * The write result will be complete (coherent) some time later
1532 * (this is practically a finite period but there is no guaranteed
1533 * latency)."
1534 *
1535 * Empirically, we observe that we need a delay of at least 75us to
1536 * be sure that the seqno write is visible by the CPU.
c6df541c 1537 */
f8973c21 1538 usleep_range(125, 250);
c6df541c
CW
1539}
1540
c04e0f3b
CW
1541static void
1542gen6_seqno_barrier(struct intel_engine_cs *engine)
4cd53c0c 1543{
c033666a 1544 struct drm_i915_private *dev_priv = engine->i915;
bcbdb6d0 1545
4cd53c0c
SV
1546 /* Workaround to force correct ordering between irq and seqno writes on
1547 * ivb (and maybe also on snb) by reading from a CS register (like
9b9ed309
CW
1548 * ACTHD) before reading the status page.
1549 *
1550 * Note that this effectively stalls the read by the time it takes to
1551 * do a memory transaction, which more or less ensures that the write
1552 * from the GPU has sufficient time to invalidate the CPU cacheline.
1553 * Alternatively we could delay the interrupt from the CS ring to give
1554 * the write time to land, but that would incur a delay after every
1555 * batch i.e. much more frequent than a delay when waiting for the
1556 * interrupt (with the same net latency).
bcbdb6d0
CW
1557 *
1558 * Also note that to prevent whole machine hangs on gen7, we have to
1559 * take the spinlock to guard against concurrent cacheline access.
9b9ed309 1560 */
bcbdb6d0 1561 spin_lock_irq(&dev_priv->uncore.lock);
c04e0f3b 1562 POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
bcbdb6d0 1563 spin_unlock_irq(&dev_priv->uncore.lock);
4cd53c0c
SV
1564}
1565
31bb59cc
CW
1566static void
1567gen5_irq_enable(struct intel_engine_cs *engine)
e48d8634 1568{
31bb59cc 1569 gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
e48d8634
SV
1570}
1571
1572static void
31bb59cc 1573gen5_irq_disable(struct intel_engine_cs *engine)
e48d8634 1574{
31bb59cc 1575 gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
e48d8634
SV
1576}
1577
31bb59cc
CW
1578static void
1579i9xx_irq_enable(struct intel_engine_cs *engine)
62fdfeaf 1580{
c033666a 1581 struct drm_i915_private *dev_priv = engine->i915;
b13c2b96 1582
31bb59cc
CW
1583 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1584 I915_WRITE(IMR, dev_priv->irq_mask);
1585 POSTING_READ_FW(RING_IMR(engine->mmio_base));
62fdfeaf
EA
1586}
1587
8187a2b7 1588static void
31bb59cc 1589i9xx_irq_disable(struct intel_engine_cs *engine)
62fdfeaf 1590{
c033666a 1591 struct drm_i915_private *dev_priv = engine->i915;
62fdfeaf 1592
31bb59cc
CW
1593 dev_priv->irq_mask |= engine->irq_enable_mask;
1594 I915_WRITE(IMR, dev_priv->irq_mask);
62fdfeaf
EA
1595}
1596
31bb59cc
CW
1597static void
1598i8xx_irq_enable(struct intel_engine_cs *engine)
c2798b19 1599{
c033666a 1600 struct drm_i915_private *dev_priv = engine->i915;
c2798b19 1601
31bb59cc
CW
1602 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1603 I915_WRITE16(IMR, dev_priv->irq_mask);
1604 POSTING_READ16(RING_IMR(engine->mmio_base));
c2798b19
CW
1605}
1606
1607static void
31bb59cc 1608i8xx_irq_disable(struct intel_engine_cs *engine)
c2798b19 1609{
c033666a 1610 struct drm_i915_private *dev_priv = engine->i915;
c2798b19 1611
31bb59cc
CW
1612 dev_priv->irq_mask |= engine->irq_enable_mask;
1613 I915_WRITE16(IMR, dev_priv->irq_mask);
c2798b19
CW
1614}
1615
b72f3acb 1616static int
7c9cf4e3 1617bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
d1b851fc 1618{
7e37f889 1619 struct intel_ring *ring = req->ring;
b72f3acb
CW
1620 int ret;
1621
5fb9de1a 1622 ret = intel_ring_begin(req, 2);
b72f3acb
CW
1623 if (ret)
1624 return ret;
1625
b5321f30
CW
1626 intel_ring_emit(ring, MI_FLUSH);
1627 intel_ring_emit(ring, MI_NOOP);
1628 intel_ring_advance(ring);
b72f3acb 1629 return 0;
d1b851fc
ZN
1630}
1631
31bb59cc
CW
1632static void
1633gen6_irq_enable(struct intel_engine_cs *engine)
0f46832f 1634{
c033666a 1635 struct drm_i915_private *dev_priv = engine->i915;
0f46832f 1636
61ff75ac
CW
1637 I915_WRITE_IMR(engine,
1638 ~(engine->irq_enable_mask |
1639 engine->irq_keep_mask));
31bb59cc 1640 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
0f46832f
CW
1641}
1642
1643static void
31bb59cc 1644gen6_irq_disable(struct intel_engine_cs *engine)
0f46832f 1645{
c033666a 1646 struct drm_i915_private *dev_priv = engine->i915;
0f46832f 1647
61ff75ac 1648 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
31bb59cc 1649 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
d1b851fc
ZN
1650}
1651
31bb59cc
CW
1652static void
1653hsw_vebox_irq_enable(struct intel_engine_cs *engine)
a19d2933 1654{
c033666a 1655 struct drm_i915_private *dev_priv = engine->i915;
a19d2933 1656
31bb59cc
CW
1657 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1658 gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
a19d2933
BW
1659}
1660
1661static void
31bb59cc 1662hsw_vebox_irq_disable(struct intel_engine_cs *engine)
a19d2933 1663{
c033666a 1664 struct drm_i915_private *dev_priv = engine->i915;
a19d2933 1665
31bb59cc
CW
1666 I915_WRITE_IMR(engine, ~0);
1667 gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
a19d2933
BW
1668}
1669
31bb59cc
CW
1670static void
1671gen8_irq_enable(struct intel_engine_cs *engine)
abd58f01 1672{
c033666a 1673 struct drm_i915_private *dev_priv = engine->i915;
abd58f01 1674
61ff75ac
CW
1675 I915_WRITE_IMR(engine,
1676 ~(engine->irq_enable_mask |
1677 engine->irq_keep_mask));
31bb59cc 1678 POSTING_READ_FW(RING_IMR(engine->mmio_base));
abd58f01
BW
1679}
1680
1681static void
31bb59cc 1682gen8_irq_disable(struct intel_engine_cs *engine)
abd58f01 1683{
c033666a 1684 struct drm_i915_private *dev_priv = engine->i915;
abd58f01 1685
61ff75ac 1686 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
abd58f01
BW
1687}
1688
d1b851fc 1689static int
803688ba
CW
1690i965_emit_bb_start(struct drm_i915_gem_request *req,
1691 u64 offset, u32 length,
1692 unsigned int dispatch_flags)
d1b851fc 1693{
7e37f889 1694 struct intel_ring *ring = req->ring;
e1f99ce6 1695 int ret;
78501eac 1696
5fb9de1a 1697 ret = intel_ring_begin(req, 2);
e1f99ce6
CW
1698 if (ret)
1699 return ret;
1700
b5321f30 1701 intel_ring_emit(ring,
65f56876
CW
1702 MI_BATCH_BUFFER_START |
1703 MI_BATCH_GTT |
8e004efc
JH
1704 (dispatch_flags & I915_DISPATCH_SECURE ?
1705 0 : MI_BATCH_NON_SECURE_I965));
b5321f30
CW
1706 intel_ring_emit(ring, offset);
1707 intel_ring_advance(ring);
78501eac 1708
d1b851fc
ZN
1709 return 0;
1710}
1711
b45305fc
SV
1712/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1713#define I830_BATCH_LIMIT (256*1024)
c4d69da1
CW
1714#define I830_TLB_ENTRIES (2)
1715#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
8187a2b7 1716static int
803688ba
CW
1717i830_emit_bb_start(struct drm_i915_gem_request *req,
1718 u64 offset, u32 len,
1719 unsigned int dispatch_flags)
62fdfeaf 1720{
7e37f889 1721 struct intel_ring *ring = req->ring;
bde13ebd 1722 u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
c4e7a414 1723 int ret;
62fdfeaf 1724
5fb9de1a 1725 ret = intel_ring_begin(req, 6);
c4d69da1
CW
1726 if (ret)
1727 return ret;
62fdfeaf 1728
c4d69da1 1729 /* Evict the invalid PTE TLBs */
b5321f30
CW
1730 intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
1731 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
1732 intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
1733 intel_ring_emit(ring, cs_offset);
1734 intel_ring_emit(ring, 0xdeadbeef);
1735 intel_ring_emit(ring, MI_NOOP);
1736 intel_ring_advance(ring);
b45305fc 1737
8e004efc 1738 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
b45305fc
SV
1739 if (len > I830_BATCH_LIMIT)
1740 return -ENOSPC;
1741
5fb9de1a 1742 ret = intel_ring_begin(req, 6 + 2);
b45305fc
SV
1743 if (ret)
1744 return ret;
c4d69da1
CW
1745
1746 /* Blit the batch (which has now all relocs applied) to the
1747 * stable batch scratch bo area (so that the CS never
1748 * stumbles over its tlb invalidation bug) ...
1749 */
b5321f30
CW
1750 intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
1751 intel_ring_emit(ring,
e2f80391 1752 BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
b5321f30
CW
1753 intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
1754 intel_ring_emit(ring, cs_offset);
1755 intel_ring_emit(ring, 4096);
1756 intel_ring_emit(ring, offset);
e2f80391 1757
b5321f30
CW
1758 intel_ring_emit(ring, MI_FLUSH);
1759 intel_ring_emit(ring, MI_NOOP);
1760 intel_ring_advance(ring);
b45305fc
SV
1761
1762 /* ... and execute it. */
c4d69da1 1763 offset = cs_offset;
b45305fc 1764 }
e1f99ce6 1765
9d611c03 1766 ret = intel_ring_begin(req, 2);
c4d69da1
CW
1767 if (ret)
1768 return ret;
1769
b5321f30
CW
1770 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1771 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1772 0 : MI_BATCH_NON_SECURE));
1773 intel_ring_advance(ring);
c4d69da1 1774
fb3256da
SV
1775 return 0;
1776}
1777
1778static int
803688ba
CW
1779i915_emit_bb_start(struct drm_i915_gem_request *req,
1780 u64 offset, u32 len,
1781 unsigned int dispatch_flags)
fb3256da 1782{
7e37f889 1783 struct intel_ring *ring = req->ring;
fb3256da
SV
1784 int ret;
1785
5fb9de1a 1786 ret = intel_ring_begin(req, 2);
fb3256da
SV
1787 if (ret)
1788 return ret;
1789
b5321f30
CW
1790 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1791 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
1792 0 : MI_BATCH_NON_SECURE));
1793 intel_ring_advance(ring);
62fdfeaf 1794
62fdfeaf
EA
1795 return 0;
1796}
1797
0bc40be8 1798static void cleanup_phys_status_page(struct intel_engine_cs *engine)
7d3fdfff 1799{
c033666a 1800 struct drm_i915_private *dev_priv = engine->i915;
7d3fdfff
VS
1801
1802 if (!dev_priv->status_page_dmah)
1803 return;
1804
91c8a326 1805 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
0bc40be8 1806 engine->status_page.page_addr = NULL;
7d3fdfff
VS
1807}
1808
0bc40be8 1809static void cleanup_status_page(struct intel_engine_cs *engine)
62fdfeaf 1810{
57e88531 1811 struct i915_vma *vma;
62fdfeaf 1812
57e88531
CW
1813 vma = fetch_and_zero(&engine->status_page.vma);
1814 if (!vma)
62fdfeaf 1815 return;
62fdfeaf 1816
57e88531
CW
1817 i915_vma_unpin(vma);
1818 i915_gem_object_unpin_map(vma->obj);
1819 i915_vma_put(vma);
62fdfeaf
EA
1820}
1821
0bc40be8 1822static int init_status_page(struct intel_engine_cs *engine)
62fdfeaf 1823{
57e88531
CW
1824 struct drm_i915_gem_object *obj;
1825 struct i915_vma *vma;
1826 unsigned int flags;
1827 int ret;
e4ffd173 1828
57e88531
CW
1829 obj = i915_gem_object_create(&engine->i915->drm, 4096);
1830 if (IS_ERR(obj)) {
1831 DRM_ERROR("Failed to allocate status page\n");
1832 return PTR_ERR(obj);
1833 }
62fdfeaf 1834
57e88531
CW
1835 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
1836 if (ret)
1837 goto err;
e3efda49 1838
57e88531
CW
1839 vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
1840 if (IS_ERR(vma)) {
1841 ret = PTR_ERR(vma);
1842 goto err;
e3efda49 1843 }
62fdfeaf 1844
57e88531
CW
1845 flags = PIN_GLOBAL;
1846 if (!HAS_LLC(engine->i915))
1847 /* On g33, we cannot place HWS above 256MiB, so
1848 * restrict its pinning to the low mappable arena.
1849 * Though this restriction is not documented for
1850 * gen4, gen5, or byt, they also behave similarly
1851 * and hang if the HWS is placed at the top of the
1852 * GTT. To generalise, it appears that all !llc
1853 * platforms have issues with us placing the HWS
1854 * above the mappable region (even though we never
1855 * actualy map it).
1856 */
1857 flags |= PIN_MAPPABLE;
1858 ret = i915_vma_pin(vma, 0, 4096, flags);
1859 if (ret)
1860 goto err;
62fdfeaf 1861
57e88531 1862 engine->status_page.vma = vma;
bde13ebd 1863 engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
57e88531
CW
1864 engine->status_page.page_addr =
1865 i915_gem_object_pin_map(obj, I915_MAP_WB);
62fdfeaf 1866
bde13ebd
CW
1867 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1868 engine->name, i915_ggtt_offset(vma));
62fdfeaf 1869 return 0;
57e88531
CW
1870
1871err:
1872 i915_gem_object_put(obj);
1873 return ret;
62fdfeaf
EA
1874}
1875
0bc40be8 1876static int init_phys_status_page(struct intel_engine_cs *engine)
6b8294a4 1877{
c033666a 1878 struct drm_i915_private *dev_priv = engine->i915;
6b8294a4 1879
57e88531
CW
1880 dev_priv->status_page_dmah =
1881 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
1882 if (!dev_priv->status_page_dmah)
1883 return -ENOMEM;
6b8294a4 1884
0bc40be8
TU
1885 engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1886 memset(engine->status_page.page_addr, 0, PAGE_SIZE);
6b8294a4
CW
1887
1888 return 0;
1889}
1890
aad29fbb 1891int intel_ring_pin(struct intel_ring *ring)
7ba717cf 1892{
a687a43a 1893 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
57e88531 1894 unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096;
9d80841e 1895 enum i915_map_type map;
57e88531 1896 struct i915_vma *vma = ring->vma;
8305216f 1897 void *addr;
7ba717cf
TD
1898 int ret;
1899
57e88531 1900 GEM_BUG_ON(ring->vaddr);
7ba717cf 1901
9d80841e
CW
1902 map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;
1903
1904 if (vma->obj->stolen)
57e88531 1905 flags |= PIN_MAPPABLE;
def0c5f6 1906
57e88531 1907 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
9d80841e 1908 if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
57e88531
CW
1909 ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1910 else
1911 ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
1912 if (unlikely(ret))
def0c5f6 1913 return ret;
57e88531 1914 }
7ba717cf 1915
57e88531
CW
1916 ret = i915_vma_pin(vma, 0, PAGE_SIZE, flags);
1917 if (unlikely(ret))
1918 return ret;
def0c5f6 1919
9d80841e 1920 if (i915_vma_is_map_and_fenceable(vma))
57e88531
CW
1921 addr = (void __force *)i915_vma_pin_iomap(vma);
1922 else
9d80841e 1923 addr = i915_gem_object_pin_map(vma->obj, map);
57e88531
CW
1924 if (IS_ERR(addr))
1925 goto err;
7ba717cf 1926
32c04f16 1927 ring->vaddr = addr;
7ba717cf 1928 return 0;
d2cad535 1929
57e88531
CW
1930err:
1931 i915_vma_unpin(vma);
1932 return PTR_ERR(addr);
7ba717cf
TD
1933}
1934
aad29fbb
CW
1935void intel_ring_unpin(struct intel_ring *ring)
1936{
1937 GEM_BUG_ON(!ring->vma);
1938 GEM_BUG_ON(!ring->vaddr);
1939
9d80841e 1940 if (i915_vma_is_map_and_fenceable(ring->vma))
aad29fbb 1941 i915_vma_unpin_iomap(ring->vma);
57e88531
CW
1942 else
1943 i915_gem_object_unpin_map(ring->vma->obj);
aad29fbb
CW
1944 ring->vaddr = NULL;
1945
57e88531 1946 i915_vma_unpin(ring->vma);
2919d291
OM
1947}
1948
57e88531
CW
1949static struct i915_vma *
1950intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
62fdfeaf 1951{
05394f39 1952 struct drm_i915_gem_object *obj;
57e88531 1953 struct i915_vma *vma;
62fdfeaf 1954
c58b735f
CW
1955 obj = i915_gem_object_create_stolen(&dev_priv->drm, size);
1956 if (!obj)
57e88531
CW
1957 obj = i915_gem_object_create(&dev_priv->drm, size);
1958 if (IS_ERR(obj))
1959 return ERR_CAST(obj);
8187a2b7 1960
24f3a8cf
AG
1961 /* mark ring buffers as read-only from GPU side by default */
1962 obj->gt_ro = 1;
1963
57e88531
CW
1964 vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
1965 if (IS_ERR(vma))
1966 goto err;
1967
1968 return vma;
e3efda49 1969
57e88531
CW
1970err:
1971 i915_gem_object_put(obj);
1972 return vma;
e3efda49
CW
1973}
1974
7e37f889
CW
1975struct intel_ring *
1976intel_engine_create_ring(struct intel_engine_cs *engine, int size)
01101fa7 1977{
7e37f889 1978 struct intel_ring *ring;
57e88531 1979 struct i915_vma *vma;
01101fa7 1980
8f942018
CW
1981 GEM_BUG_ON(!is_power_of_2(size));
1982
01101fa7 1983 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
57e88531 1984 if (!ring)
01101fa7
CW
1985 return ERR_PTR(-ENOMEM);
1986
4a570db5 1987 ring->engine = engine;
01101fa7 1988
675d9ad7
CW
1989 INIT_LIST_HEAD(&ring->request_list);
1990
01101fa7
CW
1991 ring->size = size;
1992 /* Workaround an erratum on the i830 which causes a hang if
1993 * the TAIL pointer points to within the last 2 cachelines
1994 * of the buffer.
1995 */
1996 ring->effective_size = size;
c033666a 1997 if (IS_I830(engine->i915) || IS_845G(engine->i915))
01101fa7
CW
1998 ring->effective_size -= 2 * CACHELINE_BYTES;
1999
2000 ring->last_retired_head = -1;
2001 intel_ring_update_space(ring);
2002
57e88531
CW
2003 vma = intel_ring_create_vma(engine->i915, size);
2004 if (IS_ERR(vma)) {
01101fa7 2005 kfree(ring);
57e88531 2006 return ERR_CAST(vma);
01101fa7 2007 }
57e88531 2008 ring->vma = vma;
01101fa7
CW
2009
2010 return ring;
2011}
2012
2013void
7e37f889 2014intel_ring_free(struct intel_ring *ring)
01101fa7 2015{
57e88531 2016 i915_vma_put(ring->vma);
01101fa7
CW
2017 kfree(ring);
2018}
2019
0cb26a8e
CW
2020static int intel_ring_context_pin(struct i915_gem_context *ctx,
2021 struct intel_engine_cs *engine)
2022{
2023 struct intel_context *ce = &ctx->engine[engine->id];
2024 int ret;
2025
91c8a326 2026 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
0cb26a8e
CW
2027
2028 if (ce->pin_count++)
2029 return 0;
2030
2031 if (ce->state) {
7abc98fa
CW
2032 ret = i915_gem_object_set_to_gtt_domain(ce->state->obj, false);
2033 if (ret)
2034 goto error;
2035
bf3783e5
CW
2036 ret = i915_vma_pin(ce->state, 0, ctx->ggtt_alignment,
2037 PIN_GLOBAL | PIN_HIGH);
0cb26a8e
CW
2038 if (ret)
2039 goto error;
2040 }
2041
c7c3c07d
CW
2042 /* The kernel context is only used as a placeholder for flushing the
2043 * active context. It is never used for submitting user rendering and
2044 * as such never requires the golden render context, and so we can skip
2045 * emitting it when we switch to the kernel context. This is required
2046 * as during eviction we cannot allocate and pin the renderstate in
2047 * order to initialise the context.
2048 */
2049 if (ctx == ctx->i915->kernel_context)
2050 ce->initialised = true;
2051
9a6feaf0 2052 i915_gem_context_get(ctx);
0cb26a8e
CW
2053 return 0;
2054
2055error:
2056 ce->pin_count = 0;
2057 return ret;
2058}
2059
2060static void intel_ring_context_unpin(struct i915_gem_context *ctx,
2061 struct intel_engine_cs *engine)
2062{
2063 struct intel_context *ce = &ctx->engine[engine->id];
2064
91c8a326 2065 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
0cb26a8e
CW
2066
2067 if (--ce->pin_count)
2068 return;
2069
2070 if (ce->state)
bf3783e5 2071 i915_vma_unpin(ce->state);
0cb26a8e 2072
9a6feaf0 2073 i915_gem_context_put(ctx);
0cb26a8e
CW
2074}
2075
acd27845 2076static int intel_init_ring_buffer(struct intel_engine_cs *engine)
e3efda49 2077{
acd27845 2078 struct drm_i915_private *dev_priv = engine->i915;
32c04f16 2079 struct intel_ring *ring;
e3efda49
CW
2080 int ret;
2081
0bc40be8 2082 WARN_ON(engine->buffer);
bfc882b4 2083
019bf277
TU
2084 intel_engine_setup_common(engine);
2085
0bc40be8
TU
2086 memset(engine->semaphore.sync_seqno, 0,
2087 sizeof(engine->semaphore.sync_seqno));
e3efda49 2088
019bf277 2089 ret = intel_engine_init_common(engine);
688e6c72
CW
2090 if (ret)
2091 goto error;
e3efda49 2092
0cb26a8e
CW
2093 /* We may need to do things with the shrinker which
2094 * require us to immediately switch back to the default
2095 * context. This can cause a problem as pinning the
2096 * default context also requires GTT space which may not
2097 * be available. To avoid this we always pin the default
2098 * context.
2099 */
2100 ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
2101 if (ret)
2102 goto error;
2103
32c04f16
CW
2104 ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
2105 if (IS_ERR(ring)) {
2106 ret = PTR_ERR(ring);
b0366a54
DG
2107 goto error;
2108 }
01101fa7 2109
3177659a
CS
2110 if (HWS_NEEDS_PHYSICAL(dev_priv)) {
2111 WARN_ON(engine->id != RCS);
2112 ret = init_phys_status_page(engine);
e3efda49 2113 if (ret)
8ee14975 2114 goto error;
e3efda49 2115 } else {
3177659a 2116 ret = init_status_page(engine);
e3efda49 2117 if (ret)
8ee14975 2118 goto error;
e3efda49
CW
2119 }
2120
aad29fbb 2121 ret = intel_ring_pin(ring);
bfc882b4 2122 if (ret) {
57e88531 2123 intel_ring_free(ring);
bfc882b4 2124 goto error;
e3efda49 2125 }
57e88531 2126 engine->buffer = ring;
62fdfeaf 2127
8ee14975 2128 return 0;
351e3db2 2129
8ee14975 2130error:
7e37f889 2131 intel_engine_cleanup(engine);
8ee14975 2132 return ret;
62fdfeaf
EA
2133}
2134
7e37f889 2135void intel_engine_cleanup(struct intel_engine_cs *engine)
62fdfeaf 2136{
6402c330 2137 struct drm_i915_private *dev_priv;
33626e6a 2138
117897f4 2139 if (!intel_engine_initialized(engine))
62fdfeaf
EA
2140 return;
2141
c033666a 2142 dev_priv = engine->i915;
6402c330 2143
0bc40be8 2144 if (engine->buffer) {
21a2c58a
CW
2145 WARN_ON(INTEL_GEN(dev_priv) > 2 &&
2146 (I915_READ_MODE(engine) & MODE_IDLE) == 0);
33626e6a 2147
aad29fbb 2148 intel_ring_unpin(engine->buffer);
7e37f889 2149 intel_ring_free(engine->buffer);
0bc40be8 2150 engine->buffer = NULL;
b0366a54 2151 }
78501eac 2152
0bc40be8
TU
2153 if (engine->cleanup)
2154 engine->cleanup(engine);
8d19215b 2155
3177659a 2156 if (HWS_NEEDS_PHYSICAL(dev_priv)) {
0bc40be8
TU
2157 WARN_ON(engine->id != RCS);
2158 cleanup_phys_status_page(engine);
3177659a
CS
2159 } else {
2160 cleanup_status_page(engine);
7d3fdfff 2161 }
44e895a8 2162
96a945aa 2163 intel_engine_cleanup_common(engine);
0cb26a8e
CW
2164
2165 intel_ring_context_unpin(dev_priv->kernel_context, engine);
2166
c033666a 2167 engine->i915 = NULL;
62fdfeaf
EA
2168}
2169
821ed7df
CW
2170void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
2171{
2172 struct intel_engine_cs *engine;
2173
2174 for_each_engine(engine, dev_priv) {
2175 engine->buffer->head = engine->buffer->tail;
2176 engine->buffer->last_retired_head = -1;
2177 }
2178}
2179
6689cb2b 2180int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
9d773091 2181{
6310346e
CW
2182 int ret;
2183
2184 /* Flush enough space to reduce the likelihood of waiting after
2185 * we start building the request - in which case we will just
2186 * have to repeat work.
2187 */
a0442461 2188 request->reserved_space += LEGACY_REQUEST_SIZE;
6310346e 2189
1dae2dfb 2190 request->ring = request->engine->buffer;
6310346e
CW
2191
2192 ret = intel_ring_begin(request, 0);
2193 if (ret)
2194 return ret;
2195
a0442461 2196 request->reserved_space -= LEGACY_REQUEST_SIZE;
6310346e 2197 return 0;
9d773091
CW
2198}
2199
987046ad
CW
2200static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
2201{
7e37f889 2202 struct intel_ring *ring = req->ring;
987046ad 2203 struct drm_i915_gem_request *target;
7da844c5 2204 int ret;
987046ad 2205
1dae2dfb
CW
2206 intel_ring_update_space(ring);
2207 if (ring->space >= bytes)
987046ad
CW
2208 return 0;
2209
2210 /*
2211 * Space is reserved in the ringbuffer for finalising the request,
2212 * as that cannot be allowed to fail. During request finalisation,
2213 * reserved_space is set to 0 to stop the overallocation and the
2214 * assumption is that then we never need to wait (which has the
2215 * risk of failing with EINTR).
2216 *
2217 * See also i915_gem_request_alloc() and i915_add_request().
2218 */
0251a963 2219 GEM_BUG_ON(!req->reserved_space);
987046ad 2220
675d9ad7 2221 list_for_each_entry(target, &ring->request_list, ring_link) {
987046ad
CW
2222 unsigned space;
2223
987046ad 2224 /* Would completion of this request free enough space? */
1dae2dfb
CW
2225 space = __intel_ring_space(target->postfix, ring->tail,
2226 ring->size);
987046ad
CW
2227 if (space >= bytes)
2228 break;
79bbcc29 2229 }
29b1b415 2230
675d9ad7 2231 if (WARN_ON(&target->ring_link == &ring->request_list))
987046ad
CW
2232 return -ENOSPC;
2233
22dd3bb9
CW
2234 ret = i915_wait_request(target,
2235 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
ea746f36 2236 NULL, NO_WAITBOOST);
7da844c5
CW
2237 if (ret)
2238 return ret;
2239
7da844c5
CW
2240 i915_gem_request_retire_upto(target);
2241
2242 intel_ring_update_space(ring);
2243 GEM_BUG_ON(ring->space < bytes);
2244 return 0;
29b1b415
JH
2245}
2246
987046ad 2247int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
cbcc80df 2248{
7e37f889 2249 struct intel_ring *ring = req->ring;
1dae2dfb
CW
2250 int remain_actual = ring->size - ring->tail;
2251 int remain_usable = ring->effective_size - ring->tail;
987046ad
CW
2252 int bytes = num_dwords * sizeof(u32);
2253 int total_bytes, wait_bytes;
79bbcc29 2254 bool need_wrap = false;
29b1b415 2255
0251a963 2256 total_bytes = bytes + req->reserved_space;
29b1b415 2257
79bbcc29
JH
2258 if (unlikely(bytes > remain_usable)) {
2259 /*
2260 * Not enough space for the basic request. So need to flush
2261 * out the remainder and then wait for base + reserved.
2262 */
2263 wait_bytes = remain_actual + total_bytes;
2264 need_wrap = true;
987046ad
CW
2265 } else if (unlikely(total_bytes > remain_usable)) {
2266 /*
2267 * The base request will fit but the reserved space
2268 * falls off the end. So we don't need an immediate wrap
2269 * and only need to effectively wait for the reserved
2270 * size space from the start of ringbuffer.
2271 */
0251a963 2272 wait_bytes = remain_actual + req->reserved_space;
79bbcc29 2273 } else {
987046ad
CW
2274 /* No wrapping required, just waiting. */
2275 wait_bytes = total_bytes;
cbcc80df
MK
2276 }
2277
1dae2dfb 2278 if (wait_bytes > ring->space) {
987046ad 2279 int ret = wait_for_space(req, wait_bytes);
cbcc80df
MK
2280 if (unlikely(ret))
2281 return ret;
2282 }
2283
987046ad 2284 if (unlikely(need_wrap)) {
1dae2dfb
CW
2285 GEM_BUG_ON(remain_actual > ring->space);
2286 GEM_BUG_ON(ring->tail + remain_actual > ring->size);
78501eac 2287
987046ad 2288 /* Fill the tail with MI_NOOP */
1dae2dfb
CW
2289 memset(ring->vaddr + ring->tail, 0, remain_actual);
2290 ring->tail = 0;
2291 ring->space -= remain_actual;
987046ad 2292 }
304d695c 2293
1dae2dfb
CW
2294 ring->space -= bytes;
2295 GEM_BUG_ON(ring->space < 0);
304d695c 2296 return 0;
8187a2b7 2297}
78501eac 2298
753b1ad4 2299/* Align the ring tail to a cacheline boundary */
bba09b12 2300int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
753b1ad4 2301{
7e37f889 2302 struct intel_ring *ring = req->ring;
b5321f30
CW
2303 int num_dwords =
2304 (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
753b1ad4
VS
2305 int ret;
2306
2307 if (num_dwords == 0)
2308 return 0;
2309
18393f63 2310 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
5fb9de1a 2311 ret = intel_ring_begin(req, num_dwords);
753b1ad4
VS
2312 if (ret)
2313 return ret;
2314
2315 while (num_dwords--)
b5321f30 2316 intel_ring_emit(ring, MI_NOOP);
753b1ad4 2317
b5321f30 2318 intel_ring_advance(ring);
753b1ad4
VS
2319
2320 return 0;
2321}
2322
c5efa1ad 2323static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
881f47b6 2324{
c5efa1ad 2325 struct drm_i915_private *dev_priv = request->i915;
881f47b6 2326
76f8421f
CW
2327 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2328
881f47b6 2329 /* Every tail move must follow the sequence below */
12f55818
CW
2330
2331 /* Disable notification that the ring is IDLE. The GT
2332 * will then assume that it is busy and bring it out of rc6.
2333 */
76f8421f
CW
2334 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2335 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
12f55818
CW
2336
2337 /* Clear the context id. Here be magic! */
76f8421f 2338 I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
0206e353 2339
12f55818 2340 /* Wait for the ring not to be idle, i.e. for it to wake up. */
76f8421f
CW
2341 if (intel_wait_for_register_fw(dev_priv,
2342 GEN6_BSD_SLEEP_PSMI_CONTROL,
2343 GEN6_BSD_SLEEP_INDICATOR,
2344 0,
2345 50))
12f55818 2346 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
0206e353 2347
12f55818 2348 /* Now that the ring is fully powered up, update the tail */
b0411e7d 2349 i9xx_submit_request(request);
12f55818
CW
2350
2351 /* Let the ring send IDLE messages to the GT again,
2352 * and so let it sleep to conserve power when idle.
2353 */
76f8421f
CW
2354 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2355 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2356
2357 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
881f47b6
XH
2358}
2359
7c9cf4e3 2360static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
881f47b6 2361{
7e37f889 2362 struct intel_ring *ring = req->ring;
71a77e07 2363 uint32_t cmd;
b72f3acb
CW
2364 int ret;
2365
5fb9de1a 2366 ret = intel_ring_begin(req, 4);
b72f3acb
CW
2367 if (ret)
2368 return ret;
2369
71a77e07 2370 cmd = MI_FLUSH_DW;
c033666a 2371 if (INTEL_GEN(req->i915) >= 8)
075b3bba 2372 cmd += 1;
f0a1fb10
CW
2373
2374 /* We always require a command barrier so that subsequent
2375 * commands, such as breadcrumb interrupts, are strictly ordered
2376 * wrt the contents of the write cache being flushed to memory
2377 * (and thus being coherent from the CPU).
2378 */
2379 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2380
9a289771
JB
2381 /*
2382 * Bspec vol 1c.5 - video engine command streamer:
2383 * "If ENABLED, all TLBs will be invalidated once the flush
2384 * operation is complete. This bit is only valid when the
2385 * Post-Sync Operation field is a value of 1h or 3h."
2386 */
7c9cf4e3 2387 if (mode & EMIT_INVALIDATE)
f0a1fb10
CW
2388 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
2389
b5321f30
CW
2390 intel_ring_emit(ring, cmd);
2391 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
c033666a 2392 if (INTEL_GEN(req->i915) >= 8) {
b5321f30
CW
2393 intel_ring_emit(ring, 0); /* upper addr */
2394 intel_ring_emit(ring, 0); /* value */
075b3bba 2395 } else {
b5321f30
CW
2396 intel_ring_emit(ring, 0);
2397 intel_ring_emit(ring, MI_NOOP);
075b3bba 2398 }
b5321f30 2399 intel_ring_advance(ring);
b72f3acb 2400 return 0;
881f47b6
XH
2401}
2402
1c7a0623 2403static int
803688ba
CW
2404gen8_emit_bb_start(struct drm_i915_gem_request *req,
2405 u64 offset, u32 len,
2406 unsigned int dispatch_flags)
1c7a0623 2407{
7e37f889 2408 struct intel_ring *ring = req->ring;
b5321f30 2409 bool ppgtt = USES_PPGTT(req->i915) &&
8e004efc 2410 !(dispatch_flags & I915_DISPATCH_SECURE);
1c7a0623
BW
2411 int ret;
2412
5fb9de1a 2413 ret = intel_ring_begin(req, 4);
1c7a0623
BW
2414 if (ret)
2415 return ret;
2416
2417 /* FIXME(BDW): Address space and security selectors. */
b5321f30 2418 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
919032ec
AJ
2419 (dispatch_flags & I915_DISPATCH_RS ?
2420 MI_BATCH_RESOURCE_STREAMER : 0));
b5321f30
CW
2421 intel_ring_emit(ring, lower_32_bits(offset));
2422 intel_ring_emit(ring, upper_32_bits(offset));
2423 intel_ring_emit(ring, MI_NOOP);
2424 intel_ring_advance(ring);
1c7a0623
BW
2425
2426 return 0;
2427}
2428
d7d4eedd 2429static int
803688ba
CW
2430hsw_emit_bb_start(struct drm_i915_gem_request *req,
2431 u64 offset, u32 len,
2432 unsigned int dispatch_flags)
d7d4eedd 2433{
7e37f889 2434 struct intel_ring *ring = req->ring;
d7d4eedd
CW
2435 int ret;
2436
5fb9de1a 2437 ret = intel_ring_begin(req, 2);
d7d4eedd
CW
2438 if (ret)
2439 return ret;
2440
b5321f30 2441 intel_ring_emit(ring,
77072258 2442 MI_BATCH_BUFFER_START |
8e004efc 2443 (dispatch_flags & I915_DISPATCH_SECURE ?
919032ec
AJ
2444 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
2445 (dispatch_flags & I915_DISPATCH_RS ?
2446 MI_BATCH_RESOURCE_STREAMER : 0));
d7d4eedd 2447 /* bit0-7 is the length on GEN6+ */
b5321f30
CW
2448 intel_ring_emit(ring, offset);
2449 intel_ring_advance(ring);
d7d4eedd
CW
2450
2451 return 0;
2452}
2453
881f47b6 2454static int
803688ba
CW
2455gen6_emit_bb_start(struct drm_i915_gem_request *req,
2456 u64 offset, u32 len,
2457 unsigned int dispatch_flags)
881f47b6 2458{
7e37f889 2459 struct intel_ring *ring = req->ring;
0206e353 2460 int ret;
ab6f8e32 2461
5fb9de1a 2462 ret = intel_ring_begin(req, 2);
0206e353
AJ
2463 if (ret)
2464 return ret;
e1f99ce6 2465
b5321f30 2466 intel_ring_emit(ring,
d7d4eedd 2467 MI_BATCH_BUFFER_START |
8e004efc
JH
2468 (dispatch_flags & I915_DISPATCH_SECURE ?
2469 0 : MI_BATCH_NON_SECURE_I965));
0206e353 2470 /* bit0-7 is the length on GEN6+ */
b5321f30
CW
2471 intel_ring_emit(ring, offset);
2472 intel_ring_advance(ring);
ab6f8e32 2473
0206e353 2474 return 0;
881f47b6
XH
2475}
2476
549f7365
CW
2477/* Blitter support (SandyBridge+) */
2478
7c9cf4e3 2479static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
8d19215b 2480{
7e37f889 2481 struct intel_ring *ring = req->ring;
71a77e07 2482 uint32_t cmd;
b72f3acb
CW
2483 int ret;
2484
5fb9de1a 2485 ret = intel_ring_begin(req, 4);
b72f3acb
CW
2486 if (ret)
2487 return ret;
2488
71a77e07 2489 cmd = MI_FLUSH_DW;
c033666a 2490 if (INTEL_GEN(req->i915) >= 8)
075b3bba 2491 cmd += 1;
f0a1fb10
CW
2492
2493 /* We always require a command barrier so that subsequent
2494 * commands, such as breadcrumb interrupts, are strictly ordered
2495 * wrt the contents of the write cache being flushed to memory
2496 * (and thus being coherent from the CPU).
2497 */
2498 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2499
9a289771
JB
2500 /*
2501 * Bspec vol 1c.3 - blitter engine command streamer:
2502 * "If ENABLED, all TLBs will be invalidated once the flush
2503 * operation is complete. This bit is only valid when the
2504 * Post-Sync Operation field is a value of 1h or 3h."
2505 */
7c9cf4e3 2506 if (mode & EMIT_INVALIDATE)
f0a1fb10 2507 cmd |= MI_INVALIDATE_TLB;
b5321f30
CW
2508 intel_ring_emit(ring, cmd);
2509 intel_ring_emit(ring,
e2f80391 2510 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
c033666a 2511 if (INTEL_GEN(req->i915) >= 8) {
b5321f30
CW
2512 intel_ring_emit(ring, 0); /* upper addr */
2513 intel_ring_emit(ring, 0); /* value */
075b3bba 2514 } else {
b5321f30
CW
2515 intel_ring_emit(ring, 0);
2516 intel_ring_emit(ring, MI_NOOP);
075b3bba 2517 }
b5321f30 2518 intel_ring_advance(ring);
fd3da6c9 2519
b72f3acb 2520 return 0;
8d19215b
ZN
2521}
2522
d9a64610
TU
2523static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
2524 struct intel_engine_cs *engine)
2525{
db3d4019 2526 struct drm_i915_gem_object *obj;
1b9e6650 2527 int ret, i;
db3d4019 2528
39df9190 2529 if (!i915.semaphores)
db3d4019
TU
2530 return;
2531
51d545d0
CW
2532 if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
2533 struct i915_vma *vma;
2534
91c8a326 2535 obj = i915_gem_object_create(&dev_priv->drm, 4096);
51d545d0
CW
2536 if (IS_ERR(obj))
2537 goto err;
db3d4019 2538
51d545d0
CW
2539 vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
2540 if (IS_ERR(vma))
2541 goto err_obj;
2542
2543 ret = i915_gem_object_set_to_gtt_domain(obj, false);
2544 if (ret)
2545 goto err_obj;
2546
2547 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
2548 if (ret)
2549 goto err_obj;
2550
2551 dev_priv->semaphore = vma;
2552 }
d9a64610
TU
2553
2554 if (INTEL_GEN(dev_priv) >= 8) {
bde13ebd 2555 u32 offset = i915_ggtt_offset(dev_priv->semaphore);
1b9e6650 2556
ad7bdb2b 2557 engine->semaphore.sync_to = gen8_ring_sync_to;
d9a64610 2558 engine->semaphore.signal = gen8_xcs_signal;
1b9e6650
TU
2559
2560 for (i = 0; i < I915_NUM_ENGINES; i++) {
bde13ebd 2561 u32 ring_offset;
1b9e6650
TU
2562
2563 if (i != engine->id)
2564 ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
2565 else
2566 ring_offset = MI_SEMAPHORE_SYNC_INVALID;
2567
2568 engine->semaphore.signal_ggtt[i] = ring_offset;
2569 }
d9a64610 2570 } else if (INTEL_GEN(dev_priv) >= 6) {
ad7bdb2b 2571 engine->semaphore.sync_to = gen6_ring_sync_to;
d9a64610 2572 engine->semaphore.signal = gen6_signal;
4b8e38a9
TU
2573
2574 /*
2575 * The current semaphore is only applied on pre-gen8
2576 * platform. And there is no VCS2 ring on the pre-gen8
2577 * platform. So the semaphore between RCS and VCS2 is
2578 * initialized as INVALID. Gen8 will initialize the
2579 * sema between VCS2 and RCS later.
2580 */
318f89ca 2581 for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
4b8e38a9
TU
2582 static const struct {
2583 u32 wait_mbox;
2584 i915_reg_t mbox_reg;
318f89ca
TU
2585 } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
2586 [RCS_HW] = {
2587 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
2588 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
2589 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
4b8e38a9 2590 },
318f89ca
TU
2591 [VCS_HW] = {
2592 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
2593 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
2594 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
4b8e38a9 2595 },
318f89ca
TU
2596 [BCS_HW] = {
2597 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
2598 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
2599 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
4b8e38a9 2600 },
318f89ca
TU
2601 [VECS_HW] = {
2602 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
2603 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
2604 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
4b8e38a9
TU
2605 },
2606 };
2607 u32 wait_mbox;
2608 i915_reg_t mbox_reg;
2609
318f89ca 2610 if (i == engine->hw_id) {
4b8e38a9
TU
2611 wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
2612 mbox_reg = GEN6_NOSYNC;
2613 } else {
318f89ca
TU
2614 wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
2615 mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
4b8e38a9
TU
2616 }
2617
2618 engine->semaphore.mbox.wait[i] = wait_mbox;
2619 engine->semaphore.mbox.signal[i] = mbox_reg;
2620 }
d9a64610 2621 }
51d545d0
CW
2622
2623 return;
2624
2625err_obj:
2626 i915_gem_object_put(obj);
2627err:
2628 DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
2629 i915.semaphores = 0;
d9a64610
TU
2630}
2631
ed003078
CW
2632static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
2633 struct intel_engine_cs *engine)
2634{
c78d6061
TU
2635 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << engine->irq_shift;
2636
ed003078 2637 if (INTEL_GEN(dev_priv) >= 8) {
31bb59cc
CW
2638 engine->irq_enable = gen8_irq_enable;
2639 engine->irq_disable = gen8_irq_disable;
ed003078
CW
2640 engine->irq_seqno_barrier = gen6_seqno_barrier;
2641 } else if (INTEL_GEN(dev_priv) >= 6) {
31bb59cc
CW
2642 engine->irq_enable = gen6_irq_enable;
2643 engine->irq_disable = gen6_irq_disable;
ed003078
CW
2644 engine->irq_seqno_barrier = gen6_seqno_barrier;
2645 } else if (INTEL_GEN(dev_priv) >= 5) {
31bb59cc
CW
2646 engine->irq_enable = gen5_irq_enable;
2647 engine->irq_disable = gen5_irq_disable;
f8973c21 2648 engine->irq_seqno_barrier = gen5_seqno_barrier;
ed003078 2649 } else if (INTEL_GEN(dev_priv) >= 3) {
31bb59cc
CW
2650 engine->irq_enable = i9xx_irq_enable;
2651 engine->irq_disable = i9xx_irq_disable;
ed003078 2652 } else {
31bb59cc
CW
2653 engine->irq_enable = i8xx_irq_enable;
2654 engine->irq_disable = i8xx_irq_disable;
ed003078
CW
2655 }
2656}
2657
06a2fe22
TU
2658static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2659 struct intel_engine_cs *engine)
2660{
618e4ca7
CW
2661 intel_ring_init_irq(dev_priv, engine);
2662 intel_ring_init_semaphores(dev_priv, engine);
2663
1d8a1337 2664 engine->init_hw = init_ring_common;
821ed7df 2665 engine->reset_hw = reset_ring_common;
7445a2a4 2666
ddd66c51 2667 engine->emit_request = i9xx_emit_request;
618e4ca7
CW
2668 if (i915.semaphores)
2669 engine->emit_request = gen6_sema_emit_request;
ddd66c51 2670 engine->submit_request = i9xx_submit_request;
6f7bef75
CW
2671
2672 if (INTEL_GEN(dev_priv) >= 8)
803688ba 2673 engine->emit_bb_start = gen8_emit_bb_start;
6f7bef75 2674 else if (INTEL_GEN(dev_priv) >= 6)
803688ba 2675 engine->emit_bb_start = gen6_emit_bb_start;
6f7bef75 2676 else if (INTEL_GEN(dev_priv) >= 4)
803688ba 2677 engine->emit_bb_start = i965_emit_bb_start;
6f7bef75 2678 else if (IS_I830(dev_priv) || IS_845G(dev_priv))
803688ba 2679 engine->emit_bb_start = i830_emit_bb_start;
6f7bef75 2680 else
803688ba 2681 engine->emit_bb_start = i915_emit_bb_start;
06a2fe22
TU
2682}
2683
8b3e2d36 2684int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
5c1143bb 2685{
8b3e2d36 2686 struct drm_i915_private *dev_priv = engine->i915;
3e78998a 2687 int ret;
5c1143bb 2688
06a2fe22
TU
2689 intel_ring_default_vfuncs(dev_priv, engine);
2690
61ff75ac
CW
2691 if (HAS_L3_DPF(dev_priv))
2692 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
f8973c21 2693
c033666a 2694 if (INTEL_GEN(dev_priv) >= 8) {
e2f80391 2695 engine->init_context = intel_rcs_ctx_init;
ddd66c51 2696 engine->emit_request = gen8_render_emit_request;
c7fe7d25 2697 engine->emit_flush = gen8_render_ring_flush;
39df9190 2698 if (i915.semaphores)
e2f80391 2699 engine->semaphore.signal = gen8_rcs_signal;
c033666a 2700 } else if (INTEL_GEN(dev_priv) >= 6) {
e2f80391 2701 engine->init_context = intel_rcs_ctx_init;
c7fe7d25 2702 engine->emit_flush = gen7_render_ring_flush;
c033666a 2703 if (IS_GEN6(dev_priv))
c7fe7d25 2704 engine->emit_flush = gen6_render_ring_flush;
c033666a 2705 } else if (IS_GEN5(dev_priv)) {
c7fe7d25 2706 engine->emit_flush = gen4_render_ring_flush;
59465b5f 2707 } else {
c033666a 2708 if (INTEL_GEN(dev_priv) < 4)
c7fe7d25 2709 engine->emit_flush = gen2_render_ring_flush;
46f0f8d1 2710 else
c7fe7d25 2711 engine->emit_flush = gen4_render_ring_flush;
e2f80391 2712 engine->irq_enable_mask = I915_USER_INTERRUPT;
1ec14ad3 2713 }
707d9cf9 2714
c033666a 2715 if (IS_HASWELL(dev_priv))
803688ba 2716 engine->emit_bb_start = hsw_emit_bb_start;
6f7bef75 2717
e2f80391
TU
2718 engine->init_hw = init_render_ring;
2719 engine->cleanup = render_ring_cleanup;
59465b5f 2720
acd27845 2721 ret = intel_init_ring_buffer(engine);
99be1dfe
SV
2722 if (ret)
2723 return ret;
2724
f8973c21 2725 if (INTEL_GEN(dev_priv) >= 6) {
56c0f1a7 2726 ret = intel_engine_create_scratch(engine, 4096);
7d5ea807
CW
2727 if (ret)
2728 return ret;
2729 } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
56c0f1a7 2730 ret = intel_engine_create_scratch(engine, I830_WA_SIZE);
99be1dfe
SV
2731 if (ret)
2732 return ret;
2733 }
2734
2735 return 0;
5c1143bb
XH
2736}
2737
8b3e2d36 2738int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
5c1143bb 2739{
8b3e2d36 2740 struct drm_i915_private *dev_priv = engine->i915;
58fa3835 2741
06a2fe22
TU
2742 intel_ring_default_vfuncs(dev_priv, engine);
2743
c033666a 2744 if (INTEL_GEN(dev_priv) >= 6) {
0fd2c201 2745 /* gen6 bsd needs a special wa for tail updates */
c033666a 2746 if (IS_GEN6(dev_priv))
c5efa1ad 2747 engine->submit_request = gen6_bsd_submit_request;
c7fe7d25 2748 engine->emit_flush = gen6_bsd_ring_flush;
c78d6061 2749 if (INTEL_GEN(dev_priv) < 8)
e2f80391 2750 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
58fa3835 2751 } else {
e2f80391 2752 engine->mmio_base = BSD_RING_BASE;
c7fe7d25 2753 engine->emit_flush = bsd_ring_flush;
8d228911 2754 if (IS_GEN5(dev_priv))
e2f80391 2755 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
8d228911 2756 else
e2f80391 2757 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
58fa3835 2758 }
58fa3835 2759
acd27845 2760 return intel_init_ring_buffer(engine);
5c1143bb 2761}
549f7365 2762
845f74a7 2763/**
62659920 2764 * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
845f74a7 2765 */
8b3e2d36 2766int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
845f74a7 2767{
8b3e2d36 2768 struct drm_i915_private *dev_priv = engine->i915;
06a2fe22
TU
2769
2770 intel_ring_default_vfuncs(dev_priv, engine);
2771
c7fe7d25 2772 engine->emit_flush = gen6_bsd_ring_flush;
845f74a7 2773
acd27845 2774 return intel_init_ring_buffer(engine);
845f74a7
ZY
2775}
2776
8b3e2d36 2777int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
549f7365 2778{
8b3e2d36 2779 struct drm_i915_private *dev_priv = engine->i915;
06a2fe22
TU
2780
2781 intel_ring_default_vfuncs(dev_priv, engine);
2782
c7fe7d25 2783 engine->emit_flush = gen6_ring_flush;
c78d6061 2784 if (INTEL_GEN(dev_priv) < 8)
e2f80391 2785 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
549f7365 2786
acd27845 2787 return intel_init_ring_buffer(engine);
549f7365 2788}
a7b9761d 2789
8b3e2d36 2790int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
9a8a2213 2791{
8b3e2d36 2792 struct drm_i915_private *dev_priv = engine->i915;
06a2fe22
TU
2793
2794 intel_ring_default_vfuncs(dev_priv, engine);
2795
c7fe7d25 2796 engine->emit_flush = gen6_ring_flush;
abd58f01 2797
c78d6061 2798 if (INTEL_GEN(dev_priv) < 8) {
e2f80391 2799 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
31bb59cc
CW
2800 engine->irq_enable = hsw_vebox_irq_enable;
2801 engine->irq_disable = hsw_vebox_irq_disable;
abd58f01 2802 }
9a8a2213 2803
acd27845 2804 return intel_init_ring_buffer(engine);
9a8a2213 2805}
This page took 1.349325 seconds and 4 git commands to generate.