]>
Commit | Line | Data |
---|---|---|
94b4f3ba CW |
1 | /* |
2 | * Copyright © 2016 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
a8c9b849 MW |
25 | #include <drm/drm_print.h> |
26 | ||
b978520d | 27 | #include "intel_device_info.h" |
94b4f3ba CW |
28 | #include "i915_drv.h" |
29 | ||
2e0d26f8 JN |
30 | #define PLATFORM_NAME(x) [INTEL_##x] = #x |
31 | static const char * const platform_names[] = { | |
32 | PLATFORM_NAME(I830), | |
33 | PLATFORM_NAME(I845G), | |
34 | PLATFORM_NAME(I85X), | |
35 | PLATFORM_NAME(I865G), | |
36 | PLATFORM_NAME(I915G), | |
37 | PLATFORM_NAME(I915GM), | |
38 | PLATFORM_NAME(I945G), | |
39 | PLATFORM_NAME(I945GM), | |
40 | PLATFORM_NAME(G33), | |
41 | PLATFORM_NAME(PINEVIEW), | |
c0f86832 JN |
42 | PLATFORM_NAME(I965G), |
43 | PLATFORM_NAME(I965GM), | |
f69c11ae JN |
44 | PLATFORM_NAME(G45), |
45 | PLATFORM_NAME(GM45), | |
2e0d26f8 JN |
46 | PLATFORM_NAME(IRONLAKE), |
47 | PLATFORM_NAME(SANDYBRIDGE), | |
48 | PLATFORM_NAME(IVYBRIDGE), | |
49 | PLATFORM_NAME(VALLEYVIEW), | |
50 | PLATFORM_NAME(HASWELL), | |
51 | PLATFORM_NAME(BROADWELL), | |
52 | PLATFORM_NAME(CHERRYVIEW), | |
53 | PLATFORM_NAME(SKYLAKE), | |
54 | PLATFORM_NAME(BROXTON), | |
55 | PLATFORM_NAME(KABYLAKE), | |
56 | PLATFORM_NAME(GEMINILAKE), | |
71851fa8 | 57 | PLATFORM_NAME(COFFEELAKE), |
413f3c19 | 58 | PLATFORM_NAME(CANNONLAKE), |
41231001 | 59 | PLATFORM_NAME(ICELAKE), |
2e0d26f8 JN |
60 | }; |
61 | #undef PLATFORM_NAME | |
62 | ||
63 | const char *intel_platform_name(enum intel_platform platform) | |
64 | { | |
9160095c JN |
65 | BUILD_BUG_ON(ARRAY_SIZE(platform_names) != INTEL_MAX_PLATFORMS); |
66 | ||
2e0d26f8 JN |
67 | if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) || |
68 | platform_names[platform] == NULL)) | |
69 | return "<unknown>"; | |
70 | ||
71 | return platform_names[platform]; | |
72 | } | |
73 | ||
a8c9b849 MW |
74 | void intel_device_info_dump_flags(const struct intel_device_info *info, |
75 | struct drm_printer *p) | |
76 | { | |
77 | #define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name)); | |
78 | DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG); | |
79 | #undef PRINT_FLAG | |
80 | } | |
81 | ||
5fbbe8d4 MW |
82 | static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) |
83 | { | |
8cc76693 LL |
84 | int s; |
85 | ||
0ef904bb TU |
86 | drm_printf(p, "slice total: %u, mask=%04x\n", |
87 | hweight8(sseu->slice_mask), sseu->slice_mask); | |
5fbbe8d4 | 88 | drm_printf(p, "subslice total: %u\n", sseu_subslice_total(sseu)); |
0ef904bb TU |
89 | for (s = 0; s < sseu->max_slices; s++) { |
90 | drm_printf(p, "slice%d: %u subslices, mask=%04x\n", | |
8cc76693 LL |
91 | s, hweight8(sseu->subslice_mask[s]), |
92 | sseu->subslice_mask[s]); | |
93 | } | |
5fbbe8d4 MW |
94 | drm_printf(p, "EU total: %u\n", sseu->eu_total); |
95 | drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice); | |
96 | drm_printf(p, "has slice power gating: %s\n", | |
97 | yesno(sseu->has_slice_pg)); | |
98 | drm_printf(p, "has subslice power gating: %s\n", | |
99 | yesno(sseu->has_subslice_pg)); | |
100 | drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg)); | |
101 | } | |
102 | ||
103 | void intel_device_info_dump_runtime(const struct intel_device_info *info, | |
104 | struct drm_printer *p) | |
105 | { | |
106 | sseu_dump(&info->sseu, p); | |
107 | ||
108 | drm_printf(p, "CS timestamp frequency: %u kHz\n", | |
109 | info->cs_timestamp_frequency_khz); | |
110 | } | |
111 | ||
eb10ed9a MW |
112 | void intel_device_info_dump(const struct intel_device_info *info, |
113 | struct drm_printer *p) | |
94b4f3ba | 114 | { |
eb10ed9a MW |
115 | struct drm_i915_private *dev_priv = |
116 | container_of(info, struct drm_i915_private, info); | |
94b4f3ba | 117 | |
eb10ed9a MW |
118 | drm_printf(p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n", |
119 | INTEL_DEVID(dev_priv), | |
120 | INTEL_REVID(dev_priv), | |
121 | intel_platform_name(info->platform), | |
122 | info->gen); | |
a8c9b849 | 123 | |
eb10ed9a | 124 | intel_device_info_dump_flags(info, p); |
94b4f3ba CW |
125 | } |
126 | ||
79e9cd5f LL |
127 | void intel_device_info_dump_topology(const struct sseu_dev_info *sseu, |
128 | struct drm_printer *p) | |
129 | { | |
130 | int s, ss; | |
131 | ||
132 | if (sseu->max_slices == 0) { | |
133 | drm_printf(p, "Unavailable\n"); | |
134 | return; | |
135 | } | |
136 | ||
137 | for (s = 0; s < sseu->max_slices; s++) { | |
138 | drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n", | |
139 | s, hweight8(sseu->subslice_mask[s]), | |
140 | sseu->subslice_mask[s]); | |
141 | ||
142 | for (ss = 0; ss < sseu->max_subslices; ss++) { | |
143 | u16 enabled_eus = sseu_get_eus(sseu, s, ss); | |
144 | ||
145 | drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n", | |
146 | ss, hweight16(enabled_eus), enabled_eus); | |
147 | } | |
148 | } | |
149 | } | |
150 | ||
8cc76693 LL |
151 | static u16 compute_eu_total(const struct sseu_dev_info *sseu) |
152 | { | |
153 | u16 i, total = 0; | |
154 | ||
155 | for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++) | |
156 | total += hweight8(sseu->eu_mask[i]); | |
157 | ||
158 | return total; | |
159 | } | |
160 | ||
8b5eb5e2 KG |
161 | static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) |
162 | { | |
163 | struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; | |
164 | u8 s_en; | |
165 | u32 ss_en, ss_en_mask; | |
166 | u8 eu_en; | |
167 | int s; | |
168 | ||
169 | sseu->max_slices = 1; | |
170 | sseu->max_subslices = 8; | |
171 | sseu->max_eus_per_subslice = 8; | |
172 | ||
173 | s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK; | |
174 | ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE); | |
175 | ss_en_mask = BIT(sseu->max_subslices) - 1; | |
176 | eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK); | |
177 | ||
178 | for (s = 0; s < sseu->max_slices; s++) { | |
179 | if (s_en & BIT(s)) { | |
180 | int ss_idx = sseu->max_subslices * s; | |
181 | int ss; | |
182 | ||
183 | sseu->slice_mask |= BIT(s); | |
184 | sseu->subslice_mask[s] = (ss_en >> ss_idx) & ss_en_mask; | |
185 | for (ss = 0; ss < sseu->max_subslices; ss++) { | |
186 | if (sseu->subslice_mask[s] & BIT(ss)) | |
187 | sseu_set_eus(sseu, s, ss, eu_en); | |
188 | } | |
189 | } | |
190 | } | |
191 | sseu->eu_per_subslice = hweight8(eu_en); | |
192 | sseu->eu_total = compute_eu_total(sseu); | |
193 | ||
194 | /* ICL has no power gating restrictions. */ | |
195 | sseu->has_slice_pg = 1; | |
196 | sseu->has_subslice_pg = 1; | |
197 | sseu->has_eu_pg = 1; | |
198 | } | |
199 | ||
4e9767bc BW |
200 | static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) |
201 | { | |
202 | struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; | |
203 | const u32 fuse2 = I915_READ(GEN8_FUSE2); | |
8cc76693 LL |
204 | int s, ss; |
205 | const int eu_mask = 0xff; | |
206 | u32 subslice_mask, eu_en; | |
4e9767bc BW |
207 | |
208 | sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >> | |
209 | GEN10_F2_S_ENA_SHIFT; | |
8cc76693 LL |
210 | sseu->max_slices = 6; |
211 | sseu->max_subslices = 4; | |
212 | sseu->max_eus_per_subslice = 8; | |
4e9767bc | 213 | |
8cc76693 LL |
214 | subslice_mask = (1 << 4) - 1; |
215 | subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> | |
216 | GEN10_F2_SS_DIS_SHIFT); | |
217 | ||
218 | /* | |
219 | * Slice0 can have up to 3 subslices, but there are only 2 in | |
220 | * slice1/2. | |
221 | */ | |
222 | sseu->subslice_mask[0] = subslice_mask; | |
223 | for (s = 1; s < sseu->max_slices; s++) | |
224 | sseu->subslice_mask[s] = subslice_mask & 0x3; | |
225 | ||
226 | /* Slice0 */ | |
227 | eu_en = ~I915_READ(GEN8_EU_DISABLE0); | |
228 | for (ss = 0; ss < sseu->max_subslices; ss++) | |
229 | sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask); | |
230 | /* Slice1 */ | |
231 | sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask); | |
232 | eu_en = ~I915_READ(GEN8_EU_DISABLE1); | |
233 | sseu_set_eus(sseu, 1, 1, eu_en & eu_mask); | |
234 | /* Slice2 */ | |
235 | sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask); | |
236 | sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask); | |
237 | /* Slice3 */ | |
238 | sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask); | |
239 | eu_en = ~I915_READ(GEN8_EU_DISABLE2); | |
240 | sseu_set_eus(sseu, 3, 1, eu_en & eu_mask); | |
241 | /* Slice4 */ | |
242 | sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask); | |
243 | sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask); | |
244 | /* Slice5 */ | |
245 | sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask); | |
246 | eu_en = ~I915_READ(GEN10_EU_DISABLE3); | |
247 | sseu_set_eus(sseu, 5, 1, eu_en & eu_mask); | |
248 | ||
249 | /* Do a second pass where we mark the subslices disabled if all their | |
250 | * eus are off. | |
251 | */ | |
252 | for (s = 0; s < sseu->max_slices; s++) { | |
253 | for (ss = 0; ss < sseu->max_subslices; ss++) { | |
254 | if (sseu_get_eus(sseu, s, ss) == 0) | |
255 | sseu->subslice_mask[s] &= ~BIT(ss); | |
256 | } | |
257 | } | |
258 | ||
259 | sseu->eu_total = compute_eu_total(sseu); | |
4e9767bc BW |
260 | |
261 | /* | |
262 | * CNL is expected to always have a uniform distribution | |
263 | * of EU across subslices with the exception that any one | |
264 | * EU in any one subslice may be fused off for die | |
265 | * recovery. | |
266 | */ | |
267 | sseu->eu_per_subslice = sseu_subslice_total(sseu) ? | |
268 | DIV_ROUND_UP(sseu->eu_total, | |
269 | sseu_subslice_total(sseu)) : 0; | |
270 | ||
271 | /* No restrictions on Power Gating */ | |
272 | sseu->has_slice_pg = 1; | |
273 | sseu->has_subslice_pg = 1; | |
274 | sseu->has_eu_pg = 1; | |
275 | } | |
276 | ||
94b4f3ba CW |
277 | static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) |
278 | { | |
43b67998 | 279 | struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; |
8cc76693 | 280 | u32 fuse; |
94b4f3ba CW |
281 | |
282 | fuse = I915_READ(CHV_FUSE_GT); | |
283 | ||
f08a0c92 | 284 | sseu->slice_mask = BIT(0); |
8cc76693 LL |
285 | sseu->max_slices = 1; |
286 | sseu->max_subslices = 2; | |
287 | sseu->max_eus_per_subslice = 8; | |
94b4f3ba CW |
288 | |
289 | if (!(fuse & CHV_FGT_DISABLE_SS0)) { | |
8cc76693 LL |
290 | u8 disabled_mask = |
291 | ((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >> | |
292 | CHV_FGT_EU_DIS_SS0_R0_SHIFT) | | |
293 | (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >> | |
294 | CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4); | |
295 | ||
296 | sseu->subslice_mask[0] |= BIT(0); | |
297 | sseu_set_eus(sseu, 0, 0, ~disabled_mask); | |
94b4f3ba CW |
298 | } |
299 | ||
300 | if (!(fuse & CHV_FGT_DISABLE_SS1)) { | |
8cc76693 LL |
301 | u8 disabled_mask = |
302 | ((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >> | |
303 | CHV_FGT_EU_DIS_SS1_R0_SHIFT) | | |
304 | (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >> | |
305 | CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4); | |
306 | ||
307 | sseu->subslice_mask[0] |= BIT(1); | |
308 | sseu_set_eus(sseu, 0, 1, ~disabled_mask); | |
94b4f3ba CW |
309 | } |
310 | ||
8cc76693 LL |
311 | sseu->eu_total = compute_eu_total(sseu); |
312 | ||
94b4f3ba CW |
313 | /* |
314 | * CHV expected to always have a uniform distribution of EU | |
315 | * across subslices. | |
316 | */ | |
57ec171e ID |
317 | sseu->eu_per_subslice = sseu_subslice_total(sseu) ? |
318 | sseu->eu_total / sseu_subslice_total(sseu) : | |
94b4f3ba CW |
319 | 0; |
320 | /* | |
321 | * CHV supports subslice power gating on devices with more than | |
322 | * one subslice, and supports EU power gating on devices with | |
323 | * more than one EU pair per subslice. | |
324 | */ | |
43b67998 | 325 | sseu->has_slice_pg = 0; |
57ec171e | 326 | sseu->has_subslice_pg = sseu_subslice_total(sseu) > 1; |
43b67998 | 327 | sseu->has_eu_pg = (sseu->eu_per_subslice > 2); |
94b4f3ba CW |
328 | } |
329 | ||
330 | static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) | |
331 | { | |
332 | struct intel_device_info *info = mkwrite_device_info(dev_priv); | |
43b67998 | 333 | struct sseu_dev_info *sseu = &info->sseu; |
94b4f3ba | 334 | int s, ss; |
8cc76693 LL |
335 | u32 fuse2, eu_disable, subslice_mask; |
336 | const u8 eu_mask = 0xff; | |
94b4f3ba CW |
337 | |
338 | fuse2 = I915_READ(GEN8_FUSE2); | |
f08a0c92 | 339 | sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; |
94b4f3ba | 340 | |
8cc76693 LL |
341 | /* BXT has a single slice and at most 3 subslices. */ |
342 | sseu->max_slices = IS_GEN9_LP(dev_priv) ? 1 : 3; | |
343 | sseu->max_subslices = IS_GEN9_LP(dev_priv) ? 3 : 4; | |
344 | sseu->max_eus_per_subslice = 8; | |
345 | ||
94b4f3ba CW |
346 | /* |
347 | * The subslice disable field is global, i.e. it applies | |
348 | * to each of the enabled slices. | |
349 | */ | |
8cc76693 LL |
350 | subslice_mask = (1 << sseu->max_subslices) - 1; |
351 | subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >> | |
352 | GEN9_F2_SS_DIS_SHIFT); | |
94b4f3ba CW |
353 | |
354 | /* | |
355 | * Iterate through enabled slices and subslices to | |
356 | * count the total enabled EU. | |
357 | */ | |
8cc76693 | 358 | for (s = 0; s < sseu->max_slices; s++) { |
f08a0c92 | 359 | if (!(sseu->slice_mask & BIT(s))) |
94b4f3ba CW |
360 | /* skip disabled slice */ |
361 | continue; | |
362 | ||
8cc76693 LL |
363 | sseu->subslice_mask[s] = subslice_mask; |
364 | ||
94b4f3ba | 365 | eu_disable = I915_READ(GEN9_EU_DISABLE(s)); |
8cc76693 | 366 | for (ss = 0; ss < sseu->max_subslices; ss++) { |
94b4f3ba | 367 | int eu_per_ss; |
8cc76693 | 368 | u8 eu_disabled_mask; |
94b4f3ba | 369 | |
8cc76693 | 370 | if (!(sseu->subslice_mask[s] & BIT(ss))) |
94b4f3ba CW |
371 | /* skip disabled subslice */ |
372 | continue; | |
373 | ||
b3e7f866 | 374 | eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask; |
8cc76693 LL |
375 | |
376 | sseu_set_eus(sseu, s, ss, ~eu_disabled_mask); | |
377 | ||
378 | eu_per_ss = sseu->max_eus_per_subslice - | |
379 | hweight8(eu_disabled_mask); | |
94b4f3ba CW |
380 | |
381 | /* | |
382 | * Record which subslice(s) has(have) 7 EUs. we | |
383 | * can tune the hash used to spread work among | |
384 | * subslices if they are unbalanced. | |
385 | */ | |
386 | if (eu_per_ss == 7) | |
43b67998 | 387 | sseu->subslice_7eu[s] |= BIT(ss); |
94b4f3ba CW |
388 | } |
389 | } | |
390 | ||
8cc76693 LL |
391 | sseu->eu_total = compute_eu_total(sseu); |
392 | ||
94b4f3ba CW |
393 | /* |
394 | * SKL is expected to always have a uniform distribution | |
395 | * of EU across subslices with the exception that any one | |
396 | * EU in any one subslice may be fused off for die | |
397 | * recovery. BXT is expected to be perfectly uniform in EU | |
398 | * distribution. | |
399 | */ | |
57ec171e | 400 | sseu->eu_per_subslice = sseu_subslice_total(sseu) ? |
43b67998 | 401 | DIV_ROUND_UP(sseu->eu_total, |
57ec171e | 402 | sseu_subslice_total(sseu)) : 0; |
94b4f3ba | 403 | /* |
c7ae7e9a | 404 | * SKL+ supports slice power gating on devices with more than |
94b4f3ba | 405 | * one slice, and supports EU power gating on devices with |
c7ae7e9a | 406 | * more than one EU pair per subslice. BXT+ supports subslice |
94b4f3ba CW |
407 | * power gating on devices with more than one subslice, and |
408 | * supports EU power gating on devices with more than one EU | |
409 | * pair per subslice. | |
410 | */ | |
43b67998 | 411 | sseu->has_slice_pg = |
c7ae7e9a | 412 | !IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1; |
43b67998 | 413 | sseu->has_subslice_pg = |
254e0931 | 414 | IS_GEN9_LP(dev_priv) && sseu_subslice_total(sseu) > 1; |
43b67998 | 415 | sseu->has_eu_pg = sseu->eu_per_subslice > 2; |
94b4f3ba | 416 | |
234516af | 417 | if (IS_GEN9_LP(dev_priv)) { |
8cc76693 LL |
418 | #define IS_SS_DISABLED(ss) (!(sseu->subslice_mask[0] & BIT(ss))) |
419 | info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3; | |
234516af | 420 | |
43b67998 | 421 | sseu->min_eu_in_pool = 0; |
94b4f3ba | 422 | if (info->has_pooled_eu) { |
57ec171e | 423 | if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0)) |
43b67998 | 424 | sseu->min_eu_in_pool = 3; |
57ec171e | 425 | else if (IS_SS_DISABLED(1)) |
43b67998 | 426 | sseu->min_eu_in_pool = 6; |
94b4f3ba | 427 | else |
43b67998 | 428 | sseu->min_eu_in_pool = 9; |
94b4f3ba CW |
429 | } |
430 | #undef IS_SS_DISABLED | |
431 | } | |
432 | } | |
433 | ||
434 | static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) | |
435 | { | |
43b67998 | 436 | struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; |
94b4f3ba | 437 | int s, ss; |
8cc76693 | 438 | u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */ |
94b4f3ba CW |
439 | |
440 | fuse2 = I915_READ(GEN8_FUSE2); | |
f08a0c92 | 441 | sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; |
8cc76693 LL |
442 | sseu->max_slices = 3; |
443 | sseu->max_subslices = 3; | |
444 | sseu->max_eus_per_subslice = 8; | |
445 | ||
57ec171e ID |
446 | /* |
447 | * The subslice disable field is global, i.e. it applies | |
448 | * to each of the enabled slices. | |
449 | */ | |
8cc76693 LL |
450 | subslice_mask = GENMASK(sseu->max_subslices - 1, 0); |
451 | subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >> | |
452 | GEN8_F2_SS_DIS_SHIFT); | |
94b4f3ba CW |
453 | |
454 | eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK; | |
455 | eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) | | |
456 | ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) << | |
457 | (32 - GEN8_EU_DIS0_S1_SHIFT)); | |
458 | eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) | | |
459 | ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) << | |
460 | (32 - GEN8_EU_DIS1_S2_SHIFT)); | |
461 | ||
94b4f3ba CW |
462 | /* |
463 | * Iterate through enabled slices and subslices to | |
464 | * count the total enabled EU. | |
465 | */ | |
8cc76693 | 466 | for (s = 0; s < sseu->max_slices; s++) { |
f08a0c92 | 467 | if (!(sseu->slice_mask & BIT(s))) |
94b4f3ba CW |
468 | /* skip disabled slice */ |
469 | continue; | |
470 | ||
8cc76693 LL |
471 | sseu->subslice_mask[s] = subslice_mask; |
472 | ||
473 | for (ss = 0; ss < sseu->max_subslices; ss++) { | |
474 | u8 eu_disabled_mask; | |
94b4f3ba CW |
475 | u32 n_disabled; |
476 | ||
6a67a203 | 477 | if (!(sseu->subslice_mask[s] & BIT(ss))) |
94b4f3ba CW |
478 | /* skip disabled subslice */ |
479 | continue; | |
480 | ||
8cc76693 LL |
481 | eu_disabled_mask = |
482 | eu_disable[s] >> (ss * sseu->max_eus_per_subslice); | |
483 | ||
484 | sseu_set_eus(sseu, s, ss, ~eu_disabled_mask); | |
485 | ||
486 | n_disabled = hweight8(eu_disabled_mask); | |
94b4f3ba CW |
487 | |
488 | /* | |
489 | * Record which subslices have 7 EUs. | |
490 | */ | |
8cc76693 | 491 | if (sseu->max_eus_per_subslice - n_disabled == 7) |
43b67998 | 492 | sseu->subslice_7eu[s] |= 1 << ss; |
94b4f3ba CW |
493 | } |
494 | } | |
495 | ||
8cc76693 LL |
496 | sseu->eu_total = compute_eu_total(sseu); |
497 | ||
94b4f3ba CW |
498 | /* |
499 | * BDW is expected to always have a uniform distribution of EU across | |
500 | * subslices with the exception that any one EU in any one subslice may | |
501 | * be fused off for die recovery. | |
502 | */ | |
57ec171e ID |
503 | sseu->eu_per_subslice = sseu_subslice_total(sseu) ? |
504 | DIV_ROUND_UP(sseu->eu_total, | |
505 | sseu_subslice_total(sseu)) : 0; | |
94b4f3ba CW |
506 | |
507 | /* | |
508 | * BDW supports slice power gating on devices with more than | |
509 | * one slice. | |
510 | */ | |
f08a0c92 | 511 | sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1; |
43b67998 ID |
512 | sseu->has_subslice_pg = 0; |
513 | sseu->has_eu_pg = 0; | |
94b4f3ba CW |
514 | } |
515 | ||
b8ec759e LL |
516 | static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) |
517 | { | |
518 | struct intel_device_info *info = mkwrite_device_info(dev_priv); | |
519 | struct sseu_dev_info *sseu = &info->sseu; | |
520 | u32 fuse1; | |
8cc76693 | 521 | int s, ss; |
b8ec759e LL |
522 | |
523 | /* | |
524 | * There isn't a register to tell us how many slices/subslices. We | |
525 | * work off the PCI-ids here. | |
526 | */ | |
527 | switch (info->gt) { | |
528 | default: | |
529 | MISSING_CASE(info->gt); | |
530 | /* fall through */ | |
531 | case 1: | |
532 | sseu->slice_mask = BIT(0); | |
8cc76693 | 533 | sseu->subslice_mask[0] = BIT(0); |
b8ec759e LL |
534 | break; |
535 | case 2: | |
536 | sseu->slice_mask = BIT(0); | |
8cc76693 | 537 | sseu->subslice_mask[0] = BIT(0) | BIT(1); |
b8ec759e LL |
538 | break; |
539 | case 3: | |
540 | sseu->slice_mask = BIT(0) | BIT(1); | |
8cc76693 LL |
541 | sseu->subslice_mask[0] = BIT(0) | BIT(1); |
542 | sseu->subslice_mask[1] = BIT(0) | BIT(1); | |
b8ec759e LL |
543 | break; |
544 | } | |
545 | ||
8cc76693 LL |
546 | sseu->max_slices = hweight8(sseu->slice_mask); |
547 | sseu->max_subslices = hweight8(sseu->subslice_mask[0]); | |
548 | ||
b8ec759e LL |
549 | fuse1 = I915_READ(HSW_PAVP_FUSE1); |
550 | switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) { | |
551 | default: | |
552 | MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >> | |
553 | HSW_F1_EU_DIS_SHIFT); | |
554 | /* fall through */ | |
555 | case HSW_F1_EU_DIS_10EUS: | |
556 | sseu->eu_per_subslice = 10; | |
557 | break; | |
558 | case HSW_F1_EU_DIS_8EUS: | |
559 | sseu->eu_per_subslice = 8; | |
560 | break; | |
561 | case HSW_F1_EU_DIS_6EUS: | |
562 | sseu->eu_per_subslice = 6; | |
563 | break; | |
564 | } | |
8cc76693 LL |
565 | sseu->max_eus_per_subslice = sseu->eu_per_subslice; |
566 | ||
567 | for (s = 0; s < sseu->max_slices; s++) { | |
568 | for (ss = 0; ss < sseu->max_subslices; ss++) { | |
569 | sseu_set_eus(sseu, s, ss, | |
570 | (1UL << sseu->eu_per_subslice) - 1); | |
571 | } | |
572 | } | |
b8ec759e | 573 | |
8cc76693 | 574 | sseu->eu_total = compute_eu_total(sseu); |
b8ec759e LL |
575 | |
576 | /* No powergating for you. */ | |
577 | sseu->has_slice_pg = 0; | |
578 | sseu->has_subslice_pg = 0; | |
579 | sseu->has_eu_pg = 0; | |
580 | } | |
581 | ||
f577a03b | 582 | static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv) |
dab91783 LL |
583 | { |
584 | u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE); | |
f577a03b | 585 | u32 base_freq, frac_freq; |
dab91783 LL |
586 | |
587 | base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >> | |
588 | GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1; | |
f577a03b | 589 | base_freq *= 1000; |
dab91783 LL |
590 | |
591 | frac_freq = ((ts_override & | |
592 | GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >> | |
593 | GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT); | |
f577a03b | 594 | frac_freq = 1000 / (frac_freq + 1); |
dab91783 LL |
595 | |
596 | return base_freq + frac_freq; | |
597 | } | |
598 | ||
d775a7b1 PZ |
599 | static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv, |
600 | u32 rpm_config_reg) | |
601 | { | |
602 | u32 f19_2_mhz = 19200; | |
603 | u32 f24_mhz = 24000; | |
604 | u32 crystal_clock = (rpm_config_reg & | |
605 | GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >> | |
606 | GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT; | |
607 | ||
608 | switch (crystal_clock) { | |
609 | case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ: | |
610 | return f19_2_mhz; | |
611 | case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ: | |
612 | return f24_mhz; | |
613 | default: | |
614 | MISSING_CASE(crystal_clock); | |
615 | return 0; | |
616 | } | |
617 | } | |
618 | ||
619 | static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv, | |
620 | u32 rpm_config_reg) | |
621 | { | |
622 | u32 f19_2_mhz = 19200; | |
623 | u32 f24_mhz = 24000; | |
624 | u32 f25_mhz = 25000; | |
625 | u32 f38_4_mhz = 38400; | |
626 | u32 crystal_clock = (rpm_config_reg & | |
627 | GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >> | |
628 | GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT; | |
629 | ||
630 | switch (crystal_clock) { | |
631 | case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ: | |
632 | return f24_mhz; | |
633 | case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ: | |
634 | return f19_2_mhz; | |
635 | case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ: | |
636 | return f38_4_mhz; | |
637 | case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ: | |
638 | return f25_mhz; | |
639 | default: | |
640 | MISSING_CASE(crystal_clock); | |
641 | return 0; | |
642 | } | |
643 | } | |
644 | ||
f577a03b | 645 | static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv) |
dab91783 | 646 | { |
f577a03b LL |
647 | u32 f12_5_mhz = 12500; |
648 | u32 f19_2_mhz = 19200; | |
649 | u32 f24_mhz = 24000; | |
dab91783 LL |
650 | |
651 | if (INTEL_GEN(dev_priv) <= 4) { | |
652 | /* PRMs say: | |
653 | * | |
654 | * "The value in this register increments once every 16 | |
655 | * hclks." (through the “Clocking Configuration” | |
656 | * (“CLKCFG”) MCHBAR register) | |
657 | */ | |
f577a03b | 658 | return dev_priv->rawclk_freq / 16; |
dab91783 LL |
659 | } else if (INTEL_GEN(dev_priv) <= 8) { |
660 | /* PRMs say: | |
661 | * | |
662 | * "The PCU TSC counts 10ns increments; this timestamp | |
663 | * reflects bits 38:3 of the TSC (i.e. 80ns granularity, | |
664 | * rolling over every 1.5 hours). | |
665 | */ | |
666 | return f12_5_mhz; | |
667 | } else if (INTEL_GEN(dev_priv) <= 9) { | |
668 | u32 ctc_reg = I915_READ(CTC_MODE); | |
f577a03b | 669 | u32 freq = 0; |
dab91783 LL |
670 | |
671 | if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { | |
672 | freq = read_reference_ts_freq(dev_priv); | |
673 | } else { | |
674 | freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz; | |
675 | ||
676 | /* Now figure out how the command stream's timestamp | |
677 | * register increments from this frequency (it might | |
678 | * increment only every few clock cycle). | |
679 | */ | |
680 | freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >> | |
681 | CTC_SHIFT_PARAMETER_SHIFT); | |
682 | } | |
683 | ||
684 | return freq; | |
d775a7b1 | 685 | } else if (INTEL_GEN(dev_priv) <= 11) { |
dab91783 | 686 | u32 ctc_reg = I915_READ(CTC_MODE); |
f577a03b | 687 | u32 freq = 0; |
dab91783 LL |
688 | |
689 | /* First figure out the reference frequency. There are 2 ways | |
690 | * we can compute the frequency, either through the | |
691 | * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE | |
692 | * tells us which one we should use. | |
693 | */ | |
694 | if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { | |
695 | freq = read_reference_ts_freq(dev_priv); | |
696 | } else { | |
d775a7b1 PZ |
697 | u32 rpm_config_reg = I915_READ(RPM_CONFIG0); |
698 | ||
699 | if (INTEL_GEN(dev_priv) <= 10) | |
700 | freq = gen10_get_crystal_clock_freq(dev_priv, | |
701 | rpm_config_reg); | |
702 | else | |
703 | freq = gen11_get_crystal_clock_freq(dev_priv, | |
704 | rpm_config_reg); | |
dab91783 | 705 | |
53ff2641 LL |
706 | /* Now figure out how the command stream's timestamp |
707 | * register increments from this frequency (it might | |
708 | * increment only every few clock cycle). | |
709 | */ | |
710 | freq >>= 3 - ((rpm_config_reg & | |
711 | GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >> | |
712 | GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT); | |
713 | } | |
dab91783 LL |
714 | |
715 | return freq; | |
716 | } | |
717 | ||
fe66e928 | 718 | MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n"); |
dab91783 LL |
719 | return 0; |
720 | } | |
721 | ||
6a7e51f3 MW |
722 | /** |
723 | * intel_device_info_runtime_init - initialize runtime info | |
724 | * @info: intel device info struct | |
725 | * | |
94b4f3ba CW |
726 | * Determine various intel_device_info fields at runtime. |
727 | * | |
728 | * Use it when either: | |
729 | * - it's judged too laborious to fill n static structures with the limit | |
730 | * when a simple if statement does the job, | |
731 | * - run-time checks (eg read fuse/strap registers) are needed. | |
732 | * | |
733 | * This function needs to be called: | |
734 | * - after the MMIO has been setup as we are reading registers, | |
735 | * - after the PCH has been detected, | |
736 | * - before the first usage of the fields it can tweak. | |
737 | */ | |
6a7e51f3 | 738 | void intel_device_info_runtime_init(struct intel_device_info *info) |
94b4f3ba | 739 | { |
6a7e51f3 MW |
740 | struct drm_i915_private *dev_priv = |
741 | container_of(info, struct drm_i915_private, info); | |
94b4f3ba CW |
742 | enum pipe pipe; |
743 | ||
6e7406db MK |
744 | if (INTEL_GEN(dev_priv) >= 10) { |
745 | for_each_pipe(dev_priv, pipe) | |
746 | info->num_scalers[pipe] = 2; | |
747 | } else if (INTEL_GEN(dev_priv) == 9) { | |
0bf0230e ACO |
748 | info->num_scalers[PIPE_A] = 2; |
749 | info->num_scalers[PIPE_B] = 2; | |
750 | info->num_scalers[PIPE_C] = 1; | |
751 | } | |
752 | ||
022d3093 TU |
753 | BUILD_BUG_ON(I915_NUM_ENGINES > |
754 | sizeof(intel_ring_mask_t) * BITS_PER_BYTE); | |
755 | ||
94b4f3ba CW |
756 | /* |
757 | * Skylake and Broxton currently don't expose the topmost plane as its | |
758 | * use is exclusive with the legacy cursor and we only want to expose | |
759 | * one of those, not both. Until we can safely expose the topmost plane | |
760 | * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported, | |
761 | * we don't expose the topmost plane at all to prevent ABI breakage | |
762 | * down the line. | |
763 | */ | |
8366be98 | 764 | if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv)) |
e9c98825 ACO |
765 | for_each_pipe(dev_priv, pipe) |
766 | info->num_sprites[pipe] = 3; | |
767 | else if (IS_BROXTON(dev_priv)) { | |
94b4f3ba CW |
768 | info->num_sprites[PIPE_A] = 2; |
769 | info->num_sprites[PIPE_B] = 2; | |
770 | info->num_sprites[PIPE_C] = 1; | |
33edc24d | 771 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
94b4f3ba CW |
772 | for_each_pipe(dev_priv, pipe) |
773 | info->num_sprites[pipe] = 2; | |
ab33081a | 774 | } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { |
94b4f3ba CW |
775 | for_each_pipe(dev_priv, pipe) |
776 | info->num_sprites[pipe] = 1; | |
33edc24d | 777 | } |
94b4f3ba | 778 | |
4f044a88 | 779 | if (i915_modparams.disable_display) { |
94b4f3ba CW |
780 | DRM_INFO("Display disabled (module parameter)\n"); |
781 | info->num_pipes = 0; | |
782 | } else if (info->num_pipes > 0 && | |
783 | (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) && | |
784 | HAS_PCH_SPLIT(dev_priv)) { | |
785 | u32 fuse_strap = I915_READ(FUSE_STRAP); | |
786 | u32 sfuse_strap = I915_READ(SFUSE_STRAP); | |
787 | ||
788 | /* | |
789 | * SFUSE_STRAP is supposed to have a bit signalling the display | |
790 | * is fused off. Unfortunately it seems that, at least in | |
791 | * certain cases, fused off display means that PCH display | |
792 | * reads don't land anywhere. In that case, we read 0s. | |
793 | * | |
794 | * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK | |
795 | * should be set when taking over after the firmware. | |
796 | */ | |
797 | if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE || | |
798 | sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED || | |
b9eb89b2 | 799 | (HAS_PCH_CPT(dev_priv) && |
94b4f3ba CW |
800 | !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { |
801 | DRM_INFO("Display fused off, disabling\n"); | |
802 | info->num_pipes = 0; | |
803 | } else if (fuse_strap & IVB_PIPE_C_DISABLE) { | |
804 | DRM_INFO("PipeC fused off\n"); | |
805 | info->num_pipes -= 1; | |
806 | } | |
807 | } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) { | |
808 | u32 dfsm = I915_READ(SKL_DFSM); | |
809 | u8 disabled_mask = 0; | |
810 | bool invalid; | |
811 | int num_bits; | |
812 | ||
813 | if (dfsm & SKL_DFSM_PIPE_A_DISABLE) | |
814 | disabled_mask |= BIT(PIPE_A); | |
815 | if (dfsm & SKL_DFSM_PIPE_B_DISABLE) | |
816 | disabled_mask |= BIT(PIPE_B); | |
817 | if (dfsm & SKL_DFSM_PIPE_C_DISABLE) | |
818 | disabled_mask |= BIT(PIPE_C); | |
819 | ||
820 | num_bits = hweight8(disabled_mask); | |
821 | ||
822 | switch (disabled_mask) { | |
823 | case BIT(PIPE_A): | |
824 | case BIT(PIPE_B): | |
825 | case BIT(PIPE_A) | BIT(PIPE_B): | |
826 | case BIT(PIPE_A) | BIT(PIPE_C): | |
827 | invalid = true; | |
828 | break; | |
829 | default: | |
830 | invalid = false; | |
831 | } | |
832 | ||
833 | if (num_bits > info->num_pipes || invalid) | |
834 | DRM_ERROR("invalid pipe fuse configuration: 0x%x\n", | |
835 | disabled_mask); | |
836 | else | |
837 | info->num_pipes -= num_bits; | |
838 | } | |
839 | ||
840 | /* Initialize slice/subslice/EU info */ | |
b8ec759e LL |
841 | if (IS_HASWELL(dev_priv)) |
842 | haswell_sseu_info_init(dev_priv); | |
843 | else if (IS_CHERRYVIEW(dev_priv)) | |
94b4f3ba CW |
844 | cherryview_sseu_info_init(dev_priv); |
845 | else if (IS_BROADWELL(dev_priv)) | |
846 | broadwell_sseu_info_init(dev_priv); | |
4e9767bc | 847 | else if (INTEL_GEN(dev_priv) == 9) |
94b4f3ba | 848 | gen9_sseu_info_init(dev_priv); |
8b5eb5e2 | 849 | else if (INTEL_GEN(dev_priv) == 10) |
4e9767bc | 850 | gen10_sseu_info_init(dev_priv); |
f60fa408 | 851 | else if (INTEL_GEN(dev_priv) >= 11) |
8b5eb5e2 | 852 | gen11_sseu_info_init(dev_priv); |
94b4f3ba | 853 | |
dab91783 | 854 | /* Initialize command stream timestamp frequency */ |
f577a03b | 855 | info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv); |
94b4f3ba | 856 | } |
3fed1808 CW |
857 | |
858 | void intel_driver_caps_print(const struct intel_driver_caps *caps, | |
859 | struct drm_printer *p) | |
860 | { | |
481827b4 CW |
861 | drm_printf(p, "Has logical contexts? %s\n", |
862 | yesno(caps->has_logical_contexts)); | |
3fed1808 CW |
863 | drm_printf(p, "scheduler: %x\n", caps->scheduler); |
864 | } | |
26376a7e OM |
865 | |
866 | /* | |
867 | * Determine which engines are fused off in our particular hardware. Since the | |
868 | * fuse register is in the blitter powerwell, we need forcewake to be ready at | |
869 | * this point (but later we need to prune the forcewake domains for engines that | |
870 | * are indeed fused off). | |
871 | */ | |
872 | void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) | |
873 | { | |
874 | struct intel_device_info *info = mkwrite_device_info(dev_priv); | |
875 | u8 vdbox_disable, vebox_disable; | |
876 | u32 media_fuse; | |
877 | int i; | |
878 | ||
879 | if (INTEL_GEN(dev_priv) < 11) | |
880 | return; | |
881 | ||
882 | media_fuse = I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE); | |
883 | ||
884 | vdbox_disable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; | |
885 | vebox_disable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> | |
886 | GEN11_GT_VEBOX_DISABLE_SHIFT; | |
887 | ||
888 | DRM_DEBUG_DRIVER("vdbox disable: %04x\n", vdbox_disable); | |
889 | for (i = 0; i < I915_MAX_VCS; i++) { | |
890 | if (!HAS_ENGINE(dev_priv, _VCS(i))) | |
891 | continue; | |
892 | ||
893 | if (!(BIT(i) & vdbox_disable)) | |
894 | continue; | |
895 | ||
896 | info->ring_mask &= ~ENGINE_MASK(_VCS(i)); | |
897 | DRM_DEBUG_DRIVER("vcs%u fused off\n", i); | |
898 | } | |
899 | ||
900 | DRM_DEBUG_DRIVER("vebox disable: %04x\n", vebox_disable); | |
901 | for (i = 0; i < I915_MAX_VECS; i++) { | |
902 | if (!HAS_ENGINE(dev_priv, _VECS(i))) | |
903 | continue; | |
904 | ||
905 | if (!(BIT(i) & vebox_disable)) | |
906 | continue; | |
907 | ||
908 | info->ring_mask &= ~ENGINE_MASK(_VECS(i)); | |
909 | DRM_DEBUG_DRIVER("vecs%u fused off\n", i); | |
910 | } | |
911 | } |