2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2018 Intel Corporation
8 #include "intel_workarounds.h"
11 * DOC: Hardware workarounds
13 * This file is intended as a central place to implement most [1]_ of the
14 * required workarounds for hardware to work as originally intended. They fall
15 * in five basic categories depending on how/when they are applied:
17 * - Workarounds that touch registers that are saved/restored to/from the HW
18 * context image. The list is emitted (via Load Register Immediate commands)
19 * everytime a new context is created.
20 * - GT workarounds. The list of these WAs is applied whenever these registers
21 * revert to default values (on GPU reset, suspend/resume [2]_, etc..).
22 * - Display workarounds. The list is applied during display clock-gating
24 * - Workarounds that whitelist a privileged register, so that UMDs can manage
25 * them directly. This is just a special case of a MMMIO workaround (as we
26 * write the list of these to/be-whitelisted registers to some special HW
28 * - Workaround batchbuffers, that get executed automatically by the hardware
29 * on every HW context restore.
31 * .. [1] Please notice that there are other WAs that, due to their nature,
32 * cannot be applied from a central place. Those are peppered around the rest
33 * of the code, as needed.
35 * .. [2] Technically, some registers are powercontext saved & restored, so they
36 * survive a suspend/resume. In practice, writing them again is not too
37 * costly and simplifies things. We can revisit this in the future.
42 * Keep things in this file ordered by WA type, as per the above (context, GT,
43 * display, register whitelist, batchbuffer). Then, inside each type, keep the
46 * - Infrastructure functions and macros
47 * - WAs per platform in standard gen/chrono order
48 * - Public functions to init or apply the given workaround type.
51 static int wa_add(struct drm_i915_private *dev_priv,
53 const u32 mask, const u32 val)
55 const unsigned int idx = dev_priv->workarounds.count;
57 if (WARN_ON(idx >= I915_MAX_WA_REGS))
60 dev_priv->workarounds.reg[idx].addr = addr;
61 dev_priv->workarounds.reg[idx].value = val;
62 dev_priv->workarounds.reg[idx].mask = mask;
64 dev_priv->workarounds.count++;
69 #define WA_REG(addr, mask, val) do { \
70 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
75 #define WA_SET_BIT_MASKED(addr, mask) \
76 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
78 #define WA_CLR_BIT_MASKED(addr, mask) \
79 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
81 #define WA_SET_FIELD_MASKED(addr, mask, value) \
82 WA_REG(addr, (mask), _MASKED_FIELD(mask, value))
84 static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv)
86 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
88 /* WaDisableAsyncFlipPerfMode:bdw,chv */
89 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
91 /* WaDisablePartialInstShootdown:bdw,chv */
92 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
93 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
95 /* Use Force Non-Coherent whenever executing a 3D context. This is a
96 * workaround for for a possible hang in the unlikely event a TLB
97 * invalidation occurs during a PSD flush.
99 /* WaForceEnableNonCoherent:bdw,chv */
100 /* WaHdcDisableFetchWhenMasked:bdw,chv */
101 WA_SET_BIT_MASKED(HDC_CHICKEN0,
102 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
103 HDC_FORCE_NON_COHERENT);
105 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
106 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
107 * polygons in the same 8x4 pixel/sample area to be processed without
108 * stalling waiting for the earlier ones to write to Hierarchical Z
111 * This optimization is off by default for BDW and CHV; turn it on.
113 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
115 /* Wa4x4STCOptimizationDisable:bdw,chv */
116 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
119 * BSpec recommends 8x4 when MSAA is used,
120 * however in practice 16x4 seems fastest.
122 * Note that PS/WM thread counts depend on the WIZ hashing
123 * disable bit, which we don't touch here, but it's good
124 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
126 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
127 GEN6_WIZ_HASHING_MASK,
128 GEN6_WIZ_HASHING_16x4);
133 static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv)
137 ret = gen8_ctx_workarounds_init(dev_priv);
141 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
142 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
144 /* WaDisableDopClockGating:bdw
146 * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
147 * to disable EUTC clock gating.
149 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
150 DOP_CLOCK_GATING_DISABLE);
152 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
153 GEN8_SAMPLER_POWER_BYPASS_DIS);
155 WA_SET_BIT_MASKED(HDC_CHICKEN0,
156 /* WaForceContextSaveRestoreNonCoherent:bdw */
157 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
158 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
159 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
164 static int chv_ctx_workarounds_init(struct drm_i915_private *dev_priv)
168 ret = gen8_ctx_workarounds_init(dev_priv);
172 /* WaDisableThreadStallDopClockGating:chv */
173 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
175 /* Improve HiZ throughput on CHV. */
176 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
181 static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
183 if (HAS_LLC(dev_priv)) {
184 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
186 * Must match Display Engine. See
187 * WaCompressedResourceDisplayNewHashMode.
189 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
190 GEN9_PBE_COMPRESSED_HASH_SELECTION);
191 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
192 GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
195 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
196 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
197 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
198 FLOW_CONTROL_ENABLE |
199 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
201 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
202 if (!IS_COFFEELAKE(dev_priv))
203 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
204 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
206 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
207 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
208 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
209 GEN9_ENABLE_YV12_BUGFIX |
210 GEN9_ENABLE_GPGPU_PREEMPTION);
212 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
213 /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
214 WA_SET_BIT_MASKED(CACHE_MODE_1,
215 GEN8_4x4_STC_OPTIMIZATION_DISABLE |
216 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
218 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
219 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
220 GEN9_CCS_TLB_PREFETCH_ENABLE);
222 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
223 WA_SET_BIT_MASKED(HDC_CHICKEN0,
224 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
225 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
227 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
228 * both tied to WaForceContextSaveRestoreNonCoherent
229 * in some hsds for skl. We keep the tie for all gen9. The
230 * documentation is a bit hazy and so we want to get common behaviour,
231 * even though there is no clear evidence we would need both on kbl/bxt.
232 * This area has been source of system hangs so we play it safe
233 * and mimic the skl regardless of what bspec says.
235 * Use Force Non-Coherent whenever executing a 3D context. This
236 * is a workaround for a possible hang in the unlikely event
237 * a TLB invalidation occurs during a PSD flush.
240 /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
241 WA_SET_BIT_MASKED(HDC_CHICKEN0,
242 HDC_FORCE_NON_COHERENT);
244 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
245 if (IS_SKYLAKE(dev_priv) ||
246 IS_KABYLAKE(dev_priv) ||
247 IS_COFFEELAKE(dev_priv))
248 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
249 GEN8_SAMPLER_POWER_BYPASS_DIS);
251 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
252 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
255 * Supporting preemption with fine-granularity requires changes in the
256 * batch buffer programming. Since we can't break old userspace, we
257 * need to set our default preemption level to safe value. Userspace is
258 * still able to use more fine-grained preemption levels, since in
259 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
260 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
261 * not real HW workarounds, but merely a way to start using preemption
262 * while maintaining old contract with userspace.
265 /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
266 WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
268 /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
269 WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
270 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
271 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
273 /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
274 if (IS_GEN9_LP(dev_priv))
275 WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
280 static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
282 u8 vals[3] = { 0, 0, 0 };
285 for (i = 0; i < 3; i++) {
289 * Only consider slices where one, and only one, subslice has 7
292 if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
296 * subslice_7eu[i] != 0 (because of the check above) and
297 * ss_max == 4 (maximum number of subslices possible per slice)
301 ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
305 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
308 /* Tune IZ hashing. See intel_device_info_runtime_init() */
309 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
310 GEN9_IZ_HASHING_MASK(2) |
311 GEN9_IZ_HASHING_MASK(1) |
312 GEN9_IZ_HASHING_MASK(0),
313 GEN9_IZ_HASHING(2, vals[2]) |
314 GEN9_IZ_HASHING(1, vals[1]) |
315 GEN9_IZ_HASHING(0, vals[0]));
320 static int skl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
324 ret = gen9_ctx_workarounds_init(dev_priv);
328 return skl_tune_iz_hashing(dev_priv);
331 static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv)
335 ret = gen9_ctx_workarounds_init(dev_priv);
339 /* WaDisableThreadStallDopClockGating:bxt */
340 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
341 STALL_DOP_GATING_DISABLE);
343 /* WaToEnableHwFixForPushConstHWBug:bxt */
344 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
345 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
350 static int kbl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
354 ret = gen9_ctx_workarounds_init(dev_priv);
358 /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
359 if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
360 WA_SET_BIT_MASKED(HDC_CHICKEN0,
361 HDC_FENCE_DEST_SLM_DISABLE);
363 /* WaToEnableHwFixForPushConstHWBug:kbl */
364 if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
365 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
366 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
368 /* WaDisableSbeCacheDispatchPortSharing:kbl */
369 WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
370 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
375 static int glk_ctx_workarounds_init(struct drm_i915_private *dev_priv)
379 ret = gen9_ctx_workarounds_init(dev_priv);
383 /* WaToEnableHwFixForPushConstHWBug:glk */
384 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
385 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
390 static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
394 ret = gen9_ctx_workarounds_init(dev_priv);
398 /* WaToEnableHwFixForPushConstHWBug:cfl */
399 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
400 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
402 /* WaDisableSbeCacheDispatchPortSharing:cfl */
403 WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
404 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
409 static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
411 /* WaForceContextSaveRestoreNonCoherent:cnl */
412 WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
413 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
415 /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
416 if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
417 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
419 /* WaDisableReplayBufferBankArbitrationOptimization:cnl */
420 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
421 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
423 /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
424 if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
425 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
426 GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
428 /* WaPushConstantDereferenceHoldDisable:cnl */
429 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
431 /* FtrEnableFastAnisoL1BankingFix:cnl */
432 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
434 /* WaDisable3DMidCmdPreemption:cnl */
435 WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
437 /* WaDisableGPGPUMidCmdPreemption:cnl */
438 WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
439 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
440 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
442 /* WaDisableEarlyEOT:cnl */
443 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
448 static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
450 /* Wa_1604370585:icl (pre-prod)
451 * Formerly known as WaPushConstantDereferenceHoldDisable
453 if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
454 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
455 PUSH_CONSTANT_DEREF_DISABLE);
457 /* WaForceEnableNonCoherent:icl
458 * This is not the same workaround as in early Gen9 platforms, where
459 * lacking this could cause system hangs, but coherency performance
460 * overhead is high and only a few compute workloads really need it
461 * (the register is whitelisted in hardware now, so UMDs can opt in
462 * for coherency if they have a good reason).
464 WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
469 int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
473 dev_priv->workarounds.count = 0;
475 if (INTEL_GEN(dev_priv) < 8)
477 else if (IS_BROADWELL(dev_priv))
478 err = bdw_ctx_workarounds_init(dev_priv);
479 else if (IS_CHERRYVIEW(dev_priv))
480 err = chv_ctx_workarounds_init(dev_priv);
481 else if (IS_SKYLAKE(dev_priv))
482 err = skl_ctx_workarounds_init(dev_priv);
483 else if (IS_BROXTON(dev_priv))
484 err = bxt_ctx_workarounds_init(dev_priv);
485 else if (IS_KABYLAKE(dev_priv))
486 err = kbl_ctx_workarounds_init(dev_priv);
487 else if (IS_GEMINILAKE(dev_priv))
488 err = glk_ctx_workarounds_init(dev_priv);
489 else if (IS_COFFEELAKE(dev_priv))
490 err = cfl_ctx_workarounds_init(dev_priv);
491 else if (IS_CANNONLAKE(dev_priv))
492 err = cnl_ctx_workarounds_init(dev_priv);
493 else if (IS_ICELAKE(dev_priv))
494 err = icl_ctx_workarounds_init(dev_priv);
496 MISSING_CASE(INTEL_GEN(dev_priv));
500 DRM_DEBUG_DRIVER("Number of context specific w/a: %d\n",
501 dev_priv->workarounds.count);
505 int intel_ctx_workarounds_emit(struct i915_request *rq)
507 struct i915_workarounds *w = &rq->i915->workarounds;
514 ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
518 cs = intel_ring_begin(rq, (w->count * 2 + 2));
522 *cs++ = MI_LOAD_REGISTER_IMM(w->count);
523 for (i = 0; i < w->count; i++) {
524 *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
525 *cs++ = w->reg[i].value;
529 intel_ring_advance(rq, cs);
531 ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
538 static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv)
542 static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv)
546 static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv)
548 /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
549 I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
550 _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
552 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
553 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
554 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
556 /* WaDisableKillLogic:bxt,skl,kbl */
557 if (!IS_COFFEELAKE(dev_priv))
558 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
561 if (HAS_LLC(dev_priv)) {
562 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
564 * Must match Display Engine. See
565 * WaCompressedResourceDisplayNewHashMode.
567 I915_WRITE(MMCD_MISC_CTRL,
568 I915_READ(MMCD_MISC_CTRL) |
573 /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
574 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
575 BDW_DISABLE_HDC_INVALIDATION);
577 /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
578 if (IS_GEN9_LP(dev_priv)) {
579 u32 val = I915_READ(GEN8_L3SQCREG1);
581 val &= ~L3_PRIO_CREDITS_MASK;
582 val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
583 I915_WRITE(GEN8_L3SQCREG1, val);
586 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
587 I915_WRITE(GEN8_L3SQCREG4,
588 I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES);
590 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
591 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
592 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
595 static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
597 gen9_gt_workarounds_apply(dev_priv);
599 /* WaEnableGapsTsvCreditFix:skl */
600 I915_WRITE(GEN8_GARBCNTL,
601 I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
603 /* WaDisableGafsUnitClkGating:skl */
604 I915_WRITE(GEN7_UCGCTL4,
605 I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
607 /* WaInPlaceDecompressionHang:skl */
608 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
609 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
610 I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
611 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
614 static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv)
616 gen9_gt_workarounds_apply(dev_priv);
618 /* WaDisablePooledEuLoadBalancingFix:bxt */
619 I915_WRITE(FF_SLICE_CS_CHICKEN2,
620 _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
622 /* WaInPlaceDecompressionHang:bxt */
623 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
624 I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
625 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
628 static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
630 gen9_gt_workarounds_apply(dev_priv);
632 /* WaEnableGapsTsvCreditFix:kbl */
633 I915_WRITE(GEN8_GARBCNTL,
634 I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
636 /* WaDisableDynamicCreditSharing:kbl */
637 if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
638 I915_WRITE(GAMT_CHKN_BIT_REG,
639 I915_READ(GAMT_CHKN_BIT_REG) |
640 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
642 /* WaDisableGafsUnitClkGating:kbl */
643 I915_WRITE(GEN7_UCGCTL4,
644 I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
646 /* WaInPlaceDecompressionHang:kbl */
647 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
648 I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
649 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
652 static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
654 gen9_gt_workarounds_apply(dev_priv);
657 static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
659 gen9_gt_workarounds_apply(dev_priv);
661 /* WaEnableGapsTsvCreditFix:cfl */
662 I915_WRITE(GEN8_GARBCNTL,
663 I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
665 /* WaDisableGafsUnitClkGating:cfl */
666 I915_WRITE(GEN7_UCGCTL4,
667 I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
669 /* WaInPlaceDecompressionHang:cfl */
670 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
671 I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
672 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
675 static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
677 /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
678 if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
679 I915_WRITE(GAMT_CHKN_BIT_REG,
680 I915_READ(GAMT_CHKN_BIT_REG) |
681 GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
683 /* WaInPlaceDecompressionHang:cnl */
684 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
685 I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
686 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
688 /* WaEnablePreemptionGranularityControlByUMD:cnl */
689 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
690 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
693 static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
695 /* This is not an Wa. Enable for better image quality */
696 I915_WRITE(_3D_CHICKEN3,
697 _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
699 /* WaInPlaceDecompressionHang:icl */
700 I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
701 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
703 /* WaPipelineFlushCoherentLines:icl */
704 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
705 GEN8_LQSC_FLUSH_COHERENT_LINES);
708 * Formerly known as WaGAPZPriorityScheme
710 I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) |
711 GEN11_ARBITRATION_PRIO_ORDER_MASK);
714 * Formerly known as WaL3BankAddressHashing
716 I915_WRITE(GEN8_GARBCNTL,
717 (I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) |
718 GEN11_HASH_CTRL_EXCL_BIT0);
719 I915_WRITE(GEN11_GLBLINVL,
720 (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) |
721 GEN11_BANK_HASH_ADDR_EXCL_BIT0);
723 /* WaModifyGamTlbPartitioning:icl */
724 I915_WRITE(GEN11_GACB_PERF_CTRL,
725 (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) |
726 GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
729 * Formerly known as WaDisableCleanEvicts
731 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
732 GEN11_LQSC_CLEAN_EVICT_DISABLE);
735 * Formerly known as WaCL2SFHalfMaxAlloc
737 I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) |
738 GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
739 GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
742 * Formerly known as WaDisCtxReload
744 I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) |
745 GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
747 /* Wa_1405779004:icl (pre-prod) */
748 if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
749 I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
750 I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
751 MSCUNIT_CLKGATE_DIS);
753 /* Wa_1406680159:icl */
754 I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE,
755 I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) |
758 /* Wa_1604302699:icl */
759 I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER,
760 I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) |
761 GEN11_I2M_WRITE_DISABLE);
763 /* Wa_1406838659:icl (pre-prod) */
764 if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
765 I915_WRITE(INF_UNIT_LEVEL_CLKGATE,
766 I915_READ(INF_UNIT_LEVEL_CLKGATE) |
769 /* WaForwardProgressSoftReset:icl */
770 I915_WRITE(GEN10_SCRATCH_LNCF2,
771 I915_READ(GEN10_SCRATCH_LNCF2) |
772 PMFLUSHDONE_LNICRSDROP |
773 PMFLUSH_GAPL3UNBLOCK |
777 void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
779 if (INTEL_GEN(dev_priv) < 8)
781 else if (IS_BROADWELL(dev_priv))
782 bdw_gt_workarounds_apply(dev_priv);
783 else if (IS_CHERRYVIEW(dev_priv))
784 chv_gt_workarounds_apply(dev_priv);
785 else if (IS_SKYLAKE(dev_priv))
786 skl_gt_workarounds_apply(dev_priv);
787 else if (IS_BROXTON(dev_priv))
788 bxt_gt_workarounds_apply(dev_priv);
789 else if (IS_KABYLAKE(dev_priv))
790 kbl_gt_workarounds_apply(dev_priv);
791 else if (IS_GEMINILAKE(dev_priv))
792 glk_gt_workarounds_apply(dev_priv);
793 else if (IS_COFFEELAKE(dev_priv))
794 cfl_gt_workarounds_apply(dev_priv);
795 else if (IS_CANNONLAKE(dev_priv))
796 cnl_gt_workarounds_apply(dev_priv);
797 else if (IS_ICELAKE(dev_priv))
798 icl_gt_workarounds_apply(dev_priv);
800 MISSING_CASE(INTEL_GEN(dev_priv));
804 i915_reg_t reg[RING_MAX_NONPRIV_SLOTS];
809 static void whitelist_reg(struct whitelist *w, i915_reg_t reg)
811 if (GEM_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS))
814 w->reg[w->count++] = reg;
817 static void bdw_whitelist_build(struct whitelist *w)
821 static void chv_whitelist_build(struct whitelist *w)
825 static void gen9_whitelist_build(struct whitelist *w)
827 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
828 whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
830 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
831 whitelist_reg(w, GEN8_CS_CHICKEN1);
833 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
834 whitelist_reg(w, GEN8_HDC_CHICKEN1);
837 static void skl_whitelist_build(struct whitelist *w)
839 gen9_whitelist_build(w);
841 /* WaDisableLSQCROPERFforOCL:skl */
842 whitelist_reg(w, GEN8_L3SQCREG4);
845 static void bxt_whitelist_build(struct whitelist *w)
847 gen9_whitelist_build(w);
850 static void kbl_whitelist_build(struct whitelist *w)
852 gen9_whitelist_build(w);
854 /* WaDisableLSQCROPERFforOCL:kbl */
855 whitelist_reg(w, GEN8_L3SQCREG4);
858 static void glk_whitelist_build(struct whitelist *w)
860 gen9_whitelist_build(w);
862 /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
863 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
866 static void cfl_whitelist_build(struct whitelist *w)
868 gen9_whitelist_build(w);
871 static void cnl_whitelist_build(struct whitelist *w)
873 /* WaEnablePreemptionGranularityControlByUMD:cnl */
874 whitelist_reg(w, GEN8_CS_CHICKEN1);
877 static void icl_whitelist_build(struct whitelist *w)
881 static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
884 struct drm_i915_private *i915 = engine->i915;
886 GEM_BUG_ON(engine->id != RCS);
889 w->nopid = i915_mmio_reg_offset(RING_NOPID(engine->mmio_base));
891 if (INTEL_GEN(i915) < 8)
893 else if (IS_BROADWELL(i915))
894 bdw_whitelist_build(w);
895 else if (IS_CHERRYVIEW(i915))
896 chv_whitelist_build(w);
897 else if (IS_SKYLAKE(i915))
898 skl_whitelist_build(w);
899 else if (IS_BROXTON(i915))
900 bxt_whitelist_build(w);
901 else if (IS_KABYLAKE(i915))
902 kbl_whitelist_build(w);
903 else if (IS_GEMINILAKE(i915))
904 glk_whitelist_build(w);
905 else if (IS_COFFEELAKE(i915))
906 cfl_whitelist_build(w);
907 else if (IS_CANNONLAKE(i915))
908 cnl_whitelist_build(w);
909 else if (IS_ICELAKE(i915))
910 icl_whitelist_build(w);
912 MISSING_CASE(INTEL_GEN(i915));
917 static void whitelist_apply(struct intel_engine_cs *engine,
918 const struct whitelist *w)
920 struct drm_i915_private *dev_priv = engine->i915;
921 const u32 base = engine->mmio_base;
927 intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
929 for (i = 0; i < w->count; i++)
930 I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i),
931 i915_mmio_reg_offset(w->reg[i]));
933 /* And clear the rest just in case of garbage */
934 for (; i < RING_MAX_NONPRIV_SLOTS; i++)
935 I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i), w->nopid);
937 intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
940 void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
944 whitelist_apply(engine, whitelist_build(engine, &w));
947 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
948 #include "selftests/intel_workarounds.c"