]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- |
2 | */ | |
0d6aa60b | 3 | /* |
bc54fd1a | 4 | * |
1da177e4 LT |
5 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
6 | * All Rights Reserved. | |
bc54fd1a DA |
7 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | |
9 | * copy of this software and associated documentation files (the | |
10 | * "Software"), to deal in the Software without restriction, including | |
11 | * without limitation the rights to use, copy, modify, merge, publish, | |
12 | * distribute, sub license, and/or sell copies of the Software, and to | |
13 | * permit persons to whom the Software is furnished to do so, subject to | |
14 | * the following conditions: | |
15 | * | |
16 | * The above copyright notice and this permission notice (including the | |
17 | * next paragraph) shall be included in all copies or substantial portions | |
18 | * of the Software. | |
19 | * | |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
21 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
23 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
24 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
25 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
26 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
27 | * | |
0d6aa60b | 28 | */ |
1da177e4 LT |
29 | |
30 | #ifndef _I915_DRV_H_ | |
31 | #define _I915_DRV_H_ | |
32 | ||
e9b73c67 | 33 | #include <uapi/drm/i915_drm.h> |
93b81f51 | 34 | #include <uapi/drm/drm_fourcc.h> |
e9b73c67 | 35 | |
0839ccb8 | 36 | #include <linux/io-mapping.h> |
f899fc64 | 37 | #include <linux/i2c.h> |
c167a6fc | 38 | #include <linux/i2c-algo-bit.h> |
aaa6fd2a | 39 | #include <linux/backlight.h> |
5cc9ed4b | 40 | #include <linux/hashtable.h> |
2911a35b | 41 | #include <linux/intel-iommu.h> |
742cbee8 | 42 | #include <linux/kref.h> |
9ee32fea | 43 | #include <linux/pm_qos.h> |
e73bdd20 CW |
44 | #include <linux/shmem_fs.h> |
45 | ||
46 | #include <drm/drmP.h> | |
47 | #include <drm/intel-gtt.h> | |
48 | #include <drm/drm_legacy.h> /* for struct drm_dma_handle */ | |
49 | #include <drm/drm_gem.h> | |
3b96a0b1 | 50 | #include <drm/drm_auth.h> |
e73bdd20 CW |
51 | |
52 | #include "i915_params.h" | |
53 | #include "i915_reg.h" | |
54 | ||
55 | #include "intel_bios.h" | |
ac7f11c6 | 56 | #include "intel_dpll_mgr.h" |
e73bdd20 CW |
57 | #include "intel_guc.h" |
58 | #include "intel_lrc.h" | |
59 | #include "intel_ringbuffer.h" | |
60 | ||
d501b1d2 | 61 | #include "i915_gem.h" |
e73bdd20 CW |
62 | #include "i915_gem_gtt.h" |
63 | #include "i915_gem_render_state.h" | |
05235c53 | 64 | #include "i915_gem_request.h" |
585fb111 | 65 | |
0ad35fed ZW |
66 | #include "intel_gvt.h" |
67 | ||
1da177e4 LT |
68 | /* General customization: |
69 | */ | |
70 | ||
1da177e4 LT |
71 | #define DRIVER_NAME "i915" |
72 | #define DRIVER_DESC "Intel Graphics" | |
d2b9448f | 73 | #define DRIVER_DATE "20160725" |
1da177e4 | 74 | |
c883ef1b | 75 | #undef WARN_ON |
5f77eeb0 SV |
76 | /* Many gcc seem to no see through this and fall over :( */ |
77 | #if 0 | |
78 | #define WARN_ON(x) ({ \ | |
79 | bool __i915_warn_cond = (x); \ | |
80 | if (__builtin_constant_p(__i915_warn_cond)) \ | |
81 | BUILD_BUG_ON(__i915_warn_cond); \ | |
82 | WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) | |
83 | #else | |
152b2262 | 84 | #define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")") |
5f77eeb0 SV |
85 | #endif |
86 | ||
cd9bfacb | 87 | #undef WARN_ON_ONCE |
152b2262 | 88 | #define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")") |
cd9bfacb | 89 | |
5f77eeb0 SV |
90 | #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ |
91 | (long) (x), __func__); | |
c883ef1b | 92 | |
e2c719b7 RC |
93 | /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and |
94 | * WARN_ON()) for hw state sanity checks to check for unexpected conditions | |
95 | * which may not necessarily be a user visible problem. This will either | |
96 | * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to | |
97 | * enable distros and users to tailor their preferred amount of i915 abrt | |
98 | * spam. | |
99 | */ | |
100 | #define I915_STATE_WARN(condition, format...) ({ \ | |
101 | int __ret_warn_on = !!(condition); \ | |
32753cb8 JL |
102 | if (unlikely(__ret_warn_on)) \ |
103 | if (!WARN(i915.verbose_state_checks, format)) \ | |
e2c719b7 | 104 | DRM_ERROR(format); \ |
e2c719b7 RC |
105 | unlikely(__ret_warn_on); \ |
106 | }) | |
107 | ||
152b2262 JL |
108 | #define I915_STATE_WARN_ON(x) \ |
109 | I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")") | |
c883ef1b | 110 | |
4fec15d1 ID |
111 | bool __i915_inject_load_failure(const char *func, int line); |
112 | #define i915_inject_load_failure() \ | |
113 | __i915_inject_load_failure(__func__, __LINE__) | |
114 | ||
42a8ca4c JN |
115 | static inline const char *yesno(bool v) |
116 | { | |
117 | return v ? "yes" : "no"; | |
118 | } | |
119 | ||
87ad3212 JN |
120 | static inline const char *onoff(bool v) |
121 | { | |
122 | return v ? "on" : "off"; | |
123 | } | |
124 | ||
317c35d1 | 125 | enum pipe { |
752aa88a | 126 | INVALID_PIPE = -1, |
317c35d1 JB |
127 | PIPE_A = 0, |
128 | PIPE_B, | |
9db4a9c7 | 129 | PIPE_C, |
a57c774a AK |
130 | _PIPE_EDP, |
131 | I915_MAX_PIPES = _PIPE_EDP | |
317c35d1 | 132 | }; |
9db4a9c7 | 133 | #define pipe_name(p) ((p) + 'A') |
317c35d1 | 134 | |
a5c961d1 PZ |
135 | enum transcoder { |
136 | TRANSCODER_A = 0, | |
137 | TRANSCODER_B, | |
138 | TRANSCODER_C, | |
a57c774a | 139 | TRANSCODER_EDP, |
4d1de975 JN |
140 | TRANSCODER_DSI_A, |
141 | TRANSCODER_DSI_C, | |
a57c774a | 142 | I915_MAX_TRANSCODERS |
a5c961d1 | 143 | }; |
da205630 JN |
144 | |
145 | static inline const char *transcoder_name(enum transcoder transcoder) | |
146 | { | |
147 | switch (transcoder) { | |
148 | case TRANSCODER_A: | |
149 | return "A"; | |
150 | case TRANSCODER_B: | |
151 | return "B"; | |
152 | case TRANSCODER_C: | |
153 | return "C"; | |
154 | case TRANSCODER_EDP: | |
155 | return "EDP"; | |
4d1de975 JN |
156 | case TRANSCODER_DSI_A: |
157 | return "DSI A"; | |
158 | case TRANSCODER_DSI_C: | |
159 | return "DSI C"; | |
da205630 JN |
160 | default: |
161 | return "<invalid>"; | |
162 | } | |
163 | } | |
a5c961d1 | 164 | |
4d1de975 JN |
165 | static inline bool transcoder_is_dsi(enum transcoder transcoder) |
166 | { | |
167 | return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C; | |
168 | } | |
169 | ||
84139d1e | 170 | /* |
31409e97 MR |
171 | * I915_MAX_PLANES in the enum below is the maximum (across all platforms) |
172 | * number of planes per CRTC. Not all platforms really have this many planes, | |
173 | * which means some arrays of size I915_MAX_PLANES may have unused entries | |
174 | * between the topmost sprite plane and the cursor plane. | |
84139d1e | 175 | */ |
80824003 JB |
176 | enum plane { |
177 | PLANE_A = 0, | |
178 | PLANE_B, | |
9db4a9c7 | 179 | PLANE_C, |
31409e97 MR |
180 | PLANE_CURSOR, |
181 | I915_MAX_PLANES, | |
80824003 | 182 | }; |
9db4a9c7 | 183 | #define plane_name(p) ((p) + 'A') |
52440211 | 184 | |
d615a166 | 185 | #define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A') |
06da8da2 | 186 | |
2b139522 ED |
187 | enum port { |
188 | PORT_A = 0, | |
189 | PORT_B, | |
190 | PORT_C, | |
191 | PORT_D, | |
192 | PORT_E, | |
193 | I915_MAX_PORTS | |
194 | }; | |
195 | #define port_name(p) ((p) + 'A') | |
196 | ||
a09caddd | 197 | #define I915_NUM_PHYS_VLV 2 |
e4607fcf CML |
198 | |
199 | enum dpio_channel { | |
200 | DPIO_CH0, | |
201 | DPIO_CH1 | |
202 | }; | |
203 | ||
204 | enum dpio_phy { | |
205 | DPIO_PHY0, | |
206 | DPIO_PHY1 | |
207 | }; | |
208 | ||
b97186f0 PZ |
209 | enum intel_display_power_domain { |
210 | POWER_DOMAIN_PIPE_A, | |
211 | POWER_DOMAIN_PIPE_B, | |
212 | POWER_DOMAIN_PIPE_C, | |
213 | POWER_DOMAIN_PIPE_A_PANEL_FITTER, | |
214 | POWER_DOMAIN_PIPE_B_PANEL_FITTER, | |
215 | POWER_DOMAIN_PIPE_C_PANEL_FITTER, | |
216 | POWER_DOMAIN_TRANSCODER_A, | |
217 | POWER_DOMAIN_TRANSCODER_B, | |
218 | POWER_DOMAIN_TRANSCODER_C, | |
f52e353e | 219 | POWER_DOMAIN_TRANSCODER_EDP, |
4d1de975 JN |
220 | POWER_DOMAIN_TRANSCODER_DSI_A, |
221 | POWER_DOMAIN_TRANSCODER_DSI_C, | |
6331a704 PJ |
222 | POWER_DOMAIN_PORT_DDI_A_LANES, |
223 | POWER_DOMAIN_PORT_DDI_B_LANES, | |
224 | POWER_DOMAIN_PORT_DDI_C_LANES, | |
225 | POWER_DOMAIN_PORT_DDI_D_LANES, | |
226 | POWER_DOMAIN_PORT_DDI_E_LANES, | |
319be8ae ID |
227 | POWER_DOMAIN_PORT_DSI, |
228 | POWER_DOMAIN_PORT_CRT, | |
229 | POWER_DOMAIN_PORT_OTHER, | |
cdf8dd7f | 230 | POWER_DOMAIN_VGA, |
fbeeaa23 | 231 | POWER_DOMAIN_AUDIO, |
bd2bb1b9 | 232 | POWER_DOMAIN_PLLS, |
1407121a S |
233 | POWER_DOMAIN_AUX_A, |
234 | POWER_DOMAIN_AUX_B, | |
235 | POWER_DOMAIN_AUX_C, | |
236 | POWER_DOMAIN_AUX_D, | |
f0ab43e6 | 237 | POWER_DOMAIN_GMBUS, |
dfa57627 | 238 | POWER_DOMAIN_MODESET, |
baa70707 | 239 | POWER_DOMAIN_INIT, |
bddc7645 ID |
240 | |
241 | POWER_DOMAIN_NUM, | |
b97186f0 PZ |
242 | }; |
243 | ||
244 | #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) | |
245 | #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ | |
246 | ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) | |
f52e353e ID |
247 | #define POWER_DOMAIN_TRANSCODER(tran) \ |
248 | ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ | |
249 | (tran) + POWER_DOMAIN_TRANSCODER_A) | |
b97186f0 | 250 | |
1d843f9d EE |
251 | enum hpd_pin { |
252 | HPD_NONE = 0, | |
1d843f9d EE |
253 | HPD_TV = HPD_NONE, /* TV is known to be unreliable */ |
254 | HPD_CRT, | |
255 | HPD_SDVO_B, | |
256 | HPD_SDVO_C, | |
cc24fcdc | 257 | HPD_PORT_A, |
1d843f9d EE |
258 | HPD_PORT_B, |
259 | HPD_PORT_C, | |
260 | HPD_PORT_D, | |
26951caf | 261 | HPD_PORT_E, |
1d843f9d EE |
262 | HPD_NUM_PINS |
263 | }; | |
264 | ||
c91711f9 JN |
265 | #define for_each_hpd_pin(__pin) \ |
266 | for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) | |
267 | ||
5fcece80 JN |
268 | struct i915_hotplug { |
269 | struct work_struct hotplug_work; | |
270 | ||
271 | struct { | |
272 | unsigned long last_jiffies; | |
273 | int count; | |
274 | enum { | |
275 | HPD_ENABLED = 0, | |
276 | HPD_DISABLED = 1, | |
277 | HPD_MARK_DISABLED = 2 | |
278 | } state; | |
279 | } stats[HPD_NUM_PINS]; | |
280 | u32 event_bits; | |
281 | struct delayed_work reenable_work; | |
282 | ||
283 | struct intel_digital_port *irq_port[I915_MAX_PORTS]; | |
284 | u32 long_port_mask; | |
285 | u32 short_port_mask; | |
286 | struct work_struct dig_port_work; | |
287 | ||
19625e85 L |
288 | struct work_struct poll_init_work; |
289 | bool poll_enabled; | |
290 | ||
5fcece80 JN |
291 | /* |
292 | * if we get a HPD irq from DP and a HPD irq from non-DP | |
293 | * the non-DP HPD could block the workqueue on a mode config | |
294 | * mutex getting, that userspace may have taken. However | |
295 | * userspace is waiting on the DP workqueue to run which is | |
296 | * blocked behind the non-DP one. | |
297 | */ | |
298 | struct workqueue_struct *dp_wq; | |
299 | }; | |
300 | ||
2a2d5482 CW |
301 | #define I915_GEM_GPU_DOMAINS \ |
302 | (I915_GEM_DOMAIN_RENDER | \ | |
303 | I915_GEM_DOMAIN_SAMPLER | \ | |
304 | I915_GEM_DOMAIN_COMMAND | \ | |
305 | I915_GEM_DOMAIN_INSTRUCTION | \ | |
306 | I915_GEM_DOMAIN_VERTEX) | |
62fdfeaf | 307 | |
055e393f DL |
308 | #define for_each_pipe(__dev_priv, __p) \ |
309 | for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) | |
6831f3e3 VS |
310 | #define for_each_pipe_masked(__dev_priv, __p, __mask) \ |
311 | for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \ | |
312 | for_each_if ((__mask) & (1 << (__p))) | |
dd740780 DL |
313 | #define for_each_plane(__dev_priv, __pipe, __p) \ |
314 | for ((__p) = 0; \ | |
315 | (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ | |
316 | (__p)++) | |
3bdcfc0c DL |
317 | #define for_each_sprite(__dev_priv, __p, __s) \ |
318 | for ((__s) = 0; \ | |
319 | (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ | |
320 | (__s)++) | |
9db4a9c7 | 321 | |
c3aeadc8 JN |
322 | #define for_each_port_masked(__port, __ports_mask) \ |
323 | for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ | |
324 | for_each_if ((__ports_mask) & (1 << (__port))) | |
325 | ||
d79b814d | 326 | #define for_each_crtc(dev, crtc) \ |
91c8a326 | 327 | list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) |
d79b814d | 328 | |
27321ae8 ML |
329 | #define for_each_intel_plane(dev, intel_plane) \ |
330 | list_for_each_entry(intel_plane, \ | |
91c8a326 | 331 | &(dev)->mode_config.plane_list, \ |
27321ae8 ML |
332 | base.head) |
333 | ||
c107acfe | 334 | #define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \ |
91c8a326 CW |
335 | list_for_each_entry(intel_plane, \ |
336 | &(dev)->mode_config.plane_list, \ | |
c107acfe MR |
337 | base.head) \ |
338 | for_each_if ((plane_mask) & \ | |
339 | (1 << drm_plane_index(&intel_plane->base))) | |
340 | ||
262cd2e1 VS |
341 | #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ |
342 | list_for_each_entry(intel_plane, \ | |
343 | &(dev)->mode_config.plane_list, \ | |
344 | base.head) \ | |
95150bdf | 345 | for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe) |
262cd2e1 | 346 | |
91c8a326 CW |
347 | #define for_each_intel_crtc(dev, intel_crtc) \ |
348 | list_for_each_entry(intel_crtc, \ | |
349 | &(dev)->mode_config.crtc_list, \ | |
350 | base.head) | |
d063ae48 | 351 | |
91c8a326 CW |
352 | #define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \ |
353 | list_for_each_entry(intel_crtc, \ | |
354 | &(dev)->mode_config.crtc_list, \ | |
355 | base.head) \ | |
98d39494 MR |
356 | for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base))) |
357 | ||
b2784e15 DL |
358 | #define for_each_intel_encoder(dev, intel_encoder) \ |
359 | list_for_each_entry(intel_encoder, \ | |
360 | &(dev)->mode_config.encoder_list, \ | |
361 | base.head) | |
362 | ||
3a3371ff ACO |
363 | #define for_each_intel_connector(dev, intel_connector) \ |
364 | list_for_each_entry(intel_connector, \ | |
91c8a326 | 365 | &(dev)->mode_config.connector_list, \ |
3a3371ff ACO |
366 | base.head) |
367 | ||
6c2b7c12 SV |
368 | #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ |
369 | list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ | |
95150bdf | 370 | for_each_if ((intel_encoder)->base.crtc == (__crtc)) |
6c2b7c12 | 371 | |
53f5e3ca JB |
372 | #define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ |
373 | list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ | |
95150bdf | 374 | for_each_if ((intel_connector)->base.encoder == (__encoder)) |
53f5e3ca | 375 | |
b04c5bd6 BF |
376 | #define for_each_power_domain(domain, mask) \ |
377 | for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ | |
95150bdf | 378 | for_each_if ((1 << (domain)) & (mask)) |
b04c5bd6 | 379 | |
e7b903d2 | 380 | struct drm_i915_private; |
ad46cb53 | 381 | struct i915_mm_struct; |
5cc9ed4b | 382 | struct i915_mmu_object; |
e7b903d2 | 383 | |
a6f766f3 CW |
384 | struct drm_i915_file_private { |
385 | struct drm_i915_private *dev_priv; | |
386 | struct drm_file *file; | |
387 | ||
388 | struct { | |
389 | spinlock_t lock; | |
390 | struct list_head request_list; | |
d0bc54f2 CW |
391 | /* 20ms is a fairly arbitrary limit (greater than the average frame time) |
392 | * chosen to prevent the CPU getting more than a frame ahead of the GPU | |
393 | * (when using lax throttling for the frontbuffer). We also use it to | |
394 | * offer free GPU waitboosts for severely congested workloads. | |
395 | */ | |
396 | #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) | |
a6f766f3 CW |
397 | } mm; |
398 | struct idr context_idr; | |
399 | ||
2e1b8730 CW |
400 | struct intel_rps_client { |
401 | struct list_head link; | |
402 | unsigned boosts; | |
403 | } rps; | |
a6f766f3 | 404 | |
c80ff16e | 405 | unsigned int bsd_engine; |
a6f766f3 CW |
406 | }; |
407 | ||
e69d0bc1 SV |
408 | /* Used by dp and fdi links */ |
409 | struct intel_link_m_n { | |
410 | uint32_t tu; | |
411 | uint32_t gmch_m; | |
412 | uint32_t gmch_n; | |
413 | uint32_t link_m; | |
414 | uint32_t link_n; | |
415 | }; | |
416 | ||
417 | void intel_link_compute_m_n(int bpp, int nlanes, | |
418 | int pixel_clock, int link_clock, | |
419 | struct intel_link_m_n *m_n); | |
420 | ||
1da177e4 LT |
421 | /* Interface history: |
422 | * | |
423 | * 1.1: Original. | |
0d6aa60b DA |
424 | * 1.2: Add Power Management |
425 | * 1.3: Add vblank support | |
de227f5f | 426 | * 1.4: Fix cmdbuffer path, add heap destroy |
702880f2 | 427 | * 1.5: Add vblank pipe configuration |
2228ed67 MD |
428 | * 1.6: - New ioctl for scheduling buffer swaps on vertical blank |
429 | * - Support vertical blank on secondary display pipe | |
1da177e4 LT |
430 | */ |
431 | #define DRIVER_MAJOR 1 | |
2228ed67 | 432 | #define DRIVER_MINOR 6 |
1da177e4 LT |
433 | #define DRIVER_PATCHLEVEL 0 |
434 | ||
23bc5982 | 435 | #define WATCH_LISTS 0 |
673a394b | 436 | |
0a3e67a4 JB |
437 | struct opregion_header; |
438 | struct opregion_acpi; | |
439 | struct opregion_swsci; | |
440 | struct opregion_asle; | |
441 | ||
8ee1c3db | 442 | struct intel_opregion { |
115719fc WD |
443 | struct opregion_header *header; |
444 | struct opregion_acpi *acpi; | |
445 | struct opregion_swsci *swsci; | |
ebde53c7 JN |
446 | u32 swsci_gbda_sub_functions; |
447 | u32 swsci_sbcb_sub_functions; | |
115719fc | 448 | struct opregion_asle *asle; |
04ebaadb | 449 | void *rvda; |
82730385 | 450 | const void *vbt; |
ada8f955 | 451 | u32 vbt_size; |
115719fc | 452 | u32 *lid_state; |
91a60f20 | 453 | struct work_struct asle_work; |
8ee1c3db | 454 | }; |
44834a67 | 455 | #define OPREGION_SIZE (8*1024) |
8ee1c3db | 456 | |
6ef3d427 CW |
457 | struct intel_overlay; |
458 | struct intel_overlay_error_state; | |
459 | ||
de151cf6 | 460 | #define I915_FENCE_REG_NONE -1 |
42b5aeab VS |
461 | #define I915_MAX_NUM_FENCES 32 |
462 | /* 32 fences + sign bit for FENCE_REG_NONE */ | |
463 | #define I915_MAX_NUM_FENCE_BITS 6 | |
de151cf6 JB |
464 | |
465 | struct drm_i915_fence_reg { | |
007cc8ac | 466 | struct list_head lru_list; |
caea7476 | 467 | struct drm_i915_gem_object *obj; |
1690e1eb | 468 | int pin_count; |
de151cf6 | 469 | }; |
7c1c2871 | 470 | |
9b9d172d | 471 | struct sdvo_device_mapping { |
e957d772 | 472 | u8 initialized; |
9b9d172d | 473 | u8 dvo_port; |
474 | u8 slave_addr; | |
475 | u8 dvo_wiring; | |
e957d772 | 476 | u8 i2c_pin; |
b1083333 | 477 | u8 ddc_pin; |
9b9d172d | 478 | }; |
479 | ||
c4a1d9e4 CW |
480 | struct intel_display_error_state; |
481 | ||
63eeaf38 | 482 | struct drm_i915_error_state { |
742cbee8 | 483 | struct kref ref; |
585b0288 BW |
484 | struct timeval time; |
485 | ||
cb383002 | 486 | char error_msg[128]; |
bc3d6744 | 487 | bool simulated; |
eb5be9d0 | 488 | int iommu; |
48b031e3 | 489 | u32 reset_count; |
62d5d69b | 490 | u32 suspend_count; |
cb383002 | 491 | |
585b0288 | 492 | /* Generic register state */ |
63eeaf38 JB |
493 | u32 eir; |
494 | u32 pgtbl_er; | |
be998e2e | 495 | u32 ier; |
885ea5a8 | 496 | u32 gtier[4]; |
b9a3906b | 497 | u32 ccid; |
0f3b6849 CW |
498 | u32 derrmr; |
499 | u32 forcewake; | |
585b0288 BW |
500 | u32 error; /* gen6+ */ |
501 | u32 err_int; /* gen7 */ | |
6c826f34 MK |
502 | u32 fault_data0; /* gen8, gen9 */ |
503 | u32 fault_data1; /* gen8, gen9 */ | |
585b0288 | 504 | u32 done_reg; |
91ec5d11 BW |
505 | u32 gac_eco; |
506 | u32 gam_ecochk; | |
507 | u32 gab_ctl; | |
508 | u32 gfx_mode; | |
585b0288 | 509 | u32 extra_instdone[I915_NUM_INSTDONE_REG]; |
585b0288 BW |
510 | u64 fence[I915_MAX_NUM_FENCES]; |
511 | struct intel_overlay_error_state *overlay; | |
512 | struct intel_display_error_state *display; | |
0ca36d78 | 513 | struct drm_i915_error_object *semaphore_obj; |
585b0288 | 514 | |
6361f4ba CW |
515 | struct drm_i915_error_engine { |
516 | int engine_id; | |
362b8af7 BW |
517 | /* Software tracked state */ |
518 | bool waiting; | |
688e6c72 | 519 | int num_waiters; |
362b8af7 | 520 | int hangcheck_score; |
7e37f889 | 521 | enum intel_engine_hangcheck_action hangcheck_action; |
362b8af7 BW |
522 | int num_requests; |
523 | ||
524 | /* our own tracking of ring head and tail */ | |
525 | u32 cpu_ring_head; | |
526 | u32 cpu_ring_tail; | |
527 | ||
14fd0d6d | 528 | u32 last_seqno; |
666796da | 529 | u32 semaphore_seqno[I915_NUM_ENGINES - 1]; |
362b8af7 BW |
530 | |
531 | /* Register state */ | |
94f8cf10 | 532 | u32 start; |
362b8af7 BW |
533 | u32 tail; |
534 | u32 head; | |
535 | u32 ctl; | |
536 | u32 hws; | |
537 | u32 ipeir; | |
538 | u32 ipehr; | |
539 | u32 instdone; | |
362b8af7 BW |
540 | u32 bbstate; |
541 | u32 instpm; | |
542 | u32 instps; | |
543 | u32 seqno; | |
544 | u64 bbaddr; | |
50877445 | 545 | u64 acthd; |
362b8af7 | 546 | u32 fault_reg; |
13ffadd1 | 547 | u64 faddr; |
362b8af7 | 548 | u32 rc_psmi; /* sleep state */ |
666796da | 549 | u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; |
362b8af7 | 550 | |
52d39a21 CW |
551 | struct drm_i915_error_object { |
552 | int page_count; | |
e1f12325 | 553 | u64 gtt_offset; |
52d39a21 | 554 | u32 *pages[0]; |
ab0e7ff9 | 555 | } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; |
362b8af7 | 556 | |
f85db059 | 557 | struct drm_i915_error_object *wa_ctx; |
558 | ||
52d39a21 CW |
559 | struct drm_i915_error_request { |
560 | long jiffies; | |
561 | u32 seqno; | |
ee4f42b1 | 562 | u32 tail; |
52d39a21 | 563 | } *requests; |
6c7a01ec | 564 | |
688e6c72 CW |
565 | struct drm_i915_error_waiter { |
566 | char comm[TASK_COMM_LEN]; | |
567 | pid_t pid; | |
568 | u32 seqno; | |
569 | } *waiters; | |
570 | ||
6c7a01ec BW |
571 | struct { |
572 | u32 gfx_mode; | |
573 | union { | |
574 | u64 pdp[4]; | |
575 | u32 pp_dir_base; | |
576 | }; | |
577 | } vm_info; | |
ab0e7ff9 CW |
578 | |
579 | pid_t pid; | |
580 | char comm[TASK_COMM_LEN]; | |
6361f4ba | 581 | } engine[I915_NUM_ENGINES]; |
3a448734 | 582 | |
9df30794 | 583 | struct drm_i915_error_buffer { |
a779e5ab | 584 | u32 size; |
9df30794 | 585 | u32 name; |
666796da | 586 | u32 rseqno[I915_NUM_ENGINES], wseqno; |
e1f12325 | 587 | u64 gtt_offset; |
9df30794 CW |
588 | u32 read_domains; |
589 | u32 write_domain; | |
4b9de737 | 590 | s32 fence_reg:I915_MAX_NUM_FENCE_BITS; |
9df30794 CW |
591 | s32 pinned:2; |
592 | u32 tiling:2; | |
593 | u32 dirty:1; | |
594 | u32 purgeable:1; | |
5cc9ed4b | 595 | u32 userptr:1; |
6361f4ba | 596 | s32 engine:4; |
f56383cb | 597 | u32 cache_level:3; |
95f5301d | 598 | } **active_bo, **pinned_bo; |
6c7a01ec | 599 | |
95f5301d | 600 | u32 *active_bo_count, *pinned_bo_count; |
3a448734 | 601 | u32 vm_count; |
63eeaf38 JB |
602 | }; |
603 | ||
7bd688cd | 604 | struct intel_connector; |
820d2d77 | 605 | struct intel_encoder; |
5cec258b | 606 | struct intel_crtc_state; |
5724dbd1 | 607 | struct intel_initial_plane_config; |
0e8ffe1b | 608 | struct intel_crtc; |
ee9300bb SV |
609 | struct intel_limit; |
610 | struct dpll; | |
b8cecdf5 | 611 | |
e70236a8 | 612 | struct drm_i915_display_funcs { |
e70236a8 JB |
613 | int (*get_display_clock_speed)(struct drm_device *dev); |
614 | int (*get_fifo_size)(struct drm_device *dev, int plane); | |
e3bddded | 615 | int (*compute_pipe_wm)(struct intel_crtc_state *cstate); |
ed4a6a7c MR |
616 | int (*compute_intermediate_wm)(struct drm_device *dev, |
617 | struct intel_crtc *intel_crtc, | |
618 | struct intel_crtc_state *newstate); | |
619 | void (*initial_watermarks)(struct intel_crtc_state *cstate); | |
620 | void (*optimize_watermarks)(struct intel_crtc_state *cstate); | |
98d39494 | 621 | int (*compute_global_watermarks)(struct drm_atomic_state *state); |
46ba614c | 622 | void (*update_wm)(struct drm_crtc *crtc); |
27c329ed ML |
623 | int (*modeset_calc_cdclk)(struct drm_atomic_state *state); |
624 | void (*modeset_commit_cdclk)(struct drm_atomic_state *state); | |
0e8ffe1b SV |
625 | /* Returns the active state of the crtc, and if the crtc is active, |
626 | * fills out the pipe-config with the hw state. */ | |
627 | bool (*get_pipe_config)(struct intel_crtc *, | |
5cec258b | 628 | struct intel_crtc_state *); |
5724dbd1 DL |
629 | void (*get_initial_plane_config)(struct intel_crtc *, |
630 | struct intel_initial_plane_config *); | |
190f68c5 ACO |
631 | int (*crtc_compute_clock)(struct intel_crtc *crtc, |
632 | struct intel_crtc_state *crtc_state); | |
76e5a89c SV |
633 | void (*crtc_enable)(struct drm_crtc *crtc); |
634 | void (*crtc_disable)(struct drm_crtc *crtc); | |
69bfe1a9 JN |
635 | void (*audio_codec_enable)(struct drm_connector *connector, |
636 | struct intel_encoder *encoder, | |
5e7234c9 | 637 | const struct drm_display_mode *adjusted_mode); |
69bfe1a9 | 638 | void (*audio_codec_disable)(struct intel_encoder *encoder); |
674cf967 | 639 | void (*fdi_link_train)(struct drm_crtc *crtc); |
6067aaea | 640 | void (*init_clock_gating)(struct drm_device *dev); |
5a21b665 SV |
641 | int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, |
642 | struct drm_framebuffer *fb, | |
643 | struct drm_i915_gem_object *obj, | |
644 | struct drm_i915_gem_request *req, | |
645 | uint32_t flags); | |
91d14251 | 646 | void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); |
e70236a8 JB |
647 | /* clock updates for mode set */ |
648 | /* cursor updates */ | |
649 | /* render clock increase/decrease */ | |
650 | /* display clock increase/decrease */ | |
651 | /* pll clock increase/decrease */ | |
8563b1e8 | 652 | |
b95c5321 ML |
653 | void (*load_csc_matrix)(struct drm_crtc_state *crtc_state); |
654 | void (*load_luts)(struct drm_crtc_state *crtc_state); | |
e70236a8 JB |
655 | }; |
656 | ||
48c1026a MK |
657 | enum forcewake_domain_id { |
658 | FW_DOMAIN_ID_RENDER = 0, | |
659 | FW_DOMAIN_ID_BLITTER, | |
660 | FW_DOMAIN_ID_MEDIA, | |
661 | ||
662 | FW_DOMAIN_ID_COUNT | |
663 | }; | |
664 | ||
665 | enum forcewake_domains { | |
666 | FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER), | |
667 | FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER), | |
668 | FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA), | |
669 | FORCEWAKE_ALL = (FORCEWAKE_RENDER | | |
670 | FORCEWAKE_BLITTER | | |
671 | FORCEWAKE_MEDIA) | |
672 | }; | |
673 | ||
3756685a TU |
674 | #define FW_REG_READ (1) |
675 | #define FW_REG_WRITE (2) | |
676 | ||
677 | enum forcewake_domains | |
678 | intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, | |
679 | i915_reg_t reg, unsigned int op); | |
680 | ||
907b28c5 | 681 | struct intel_uncore_funcs { |
c8d9a590 | 682 | void (*force_wake_get)(struct drm_i915_private *dev_priv, |
48c1026a | 683 | enum forcewake_domains domains); |
c8d9a590 | 684 | void (*force_wake_put)(struct drm_i915_private *dev_priv, |
48c1026a | 685 | enum forcewake_domains domains); |
0b274481 | 686 | |
f0f59a00 VS |
687 | uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); |
688 | uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); | |
689 | uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); | |
690 | uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace); | |
0b274481 | 691 | |
f0f59a00 | 692 | void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r, |
0b274481 | 693 | uint8_t val, bool trace); |
f0f59a00 | 694 | void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r, |
0b274481 | 695 | uint16_t val, bool trace); |
f0f59a00 | 696 | void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r, |
0b274481 | 697 | uint32_t val, bool trace); |
f0f59a00 | 698 | void (*mmio_writeq)(struct drm_i915_private *dev_priv, i915_reg_t r, |
0b274481 | 699 | uint64_t val, bool trace); |
990bbdad CW |
700 | }; |
701 | ||
907b28c5 CW |
702 | struct intel_uncore { |
703 | spinlock_t lock; /** lock is also taken in irq contexts. */ | |
704 | ||
705 | struct intel_uncore_funcs funcs; | |
706 | ||
707 | unsigned fifo_count; | |
48c1026a | 708 | enum forcewake_domains fw_domains; |
b2cff0db CW |
709 | |
710 | struct intel_uncore_forcewake_domain { | |
711 | struct drm_i915_private *i915; | |
48c1026a | 712 | enum forcewake_domain_id id; |
33c582c1 | 713 | enum forcewake_domains mask; |
b2cff0db | 714 | unsigned wake_count; |
a57a4a67 | 715 | struct hrtimer timer; |
f0f59a00 | 716 | i915_reg_t reg_set; |
05a2fb15 MK |
717 | u32 val_set; |
718 | u32 val_clear; | |
f0f59a00 VS |
719 | i915_reg_t reg_ack; |
720 | i915_reg_t reg_post; | |
05a2fb15 | 721 | u32 val_reset; |
b2cff0db | 722 | } fw_domain[FW_DOMAIN_ID_COUNT]; |
75714940 MK |
723 | |
724 | int unclaimed_mmio_check; | |
b2cff0db CW |
725 | }; |
726 | ||
727 | /* Iterate over initialised fw domains */ | |
33c582c1 TU |
728 | #define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \ |
729 | for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ | |
730 | (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \ | |
731 | (domain__)++) \ | |
732 | for_each_if ((mask__) & (domain__)->mask) | |
733 | ||
734 | #define for_each_fw_domain(domain__, dev_priv__) \ | |
735 | for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__) | |
907b28c5 | 736 | |
b6e7d894 DL |
737 | #define CSR_VERSION(major, minor) ((major) << 16 | (minor)) |
738 | #define CSR_VERSION_MAJOR(version) ((version) >> 16) | |
739 | #define CSR_VERSION_MINOR(version) ((version) & 0xffff) | |
740 | ||
eb805623 | 741 | struct intel_csr { |
8144ac59 | 742 | struct work_struct work; |
eb805623 | 743 | const char *fw_path; |
a7f749f9 | 744 | uint32_t *dmc_payload; |
eb805623 | 745 | uint32_t dmc_fw_size; |
b6e7d894 | 746 | uint32_t version; |
eb805623 | 747 | uint32_t mmio_count; |
f0f59a00 | 748 | i915_reg_t mmioaddr[8]; |
eb805623 | 749 | uint32_t mmiodata[8]; |
832dba88 | 750 | uint32_t dc_state; |
a37baf3b | 751 | uint32_t allowed_dc_mask; |
eb805623 SV |
752 | }; |
753 | ||
79fc46df DL |
754 | #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ |
755 | func(is_mobile) sep \ | |
756 | func(is_i85x) sep \ | |
757 | func(is_i915g) sep \ | |
758 | func(is_i945gm) sep \ | |
759 | func(is_g33) sep \ | |
760 | func(need_gfx_hws) sep \ | |
761 | func(is_g4x) sep \ | |
762 | func(is_pineview) sep \ | |
763 | func(is_broadwater) sep \ | |
764 | func(is_crestline) sep \ | |
765 | func(is_ivybridge) sep \ | |
766 | func(is_valleyview) sep \ | |
666a4537 | 767 | func(is_cherryview) sep \ |
79fc46df | 768 | func(is_haswell) sep \ |
ab0d24ac | 769 | func(is_broadwell) sep \ |
7201c0b3 | 770 | func(is_skylake) sep \ |
7526ac19 | 771 | func(is_broxton) sep \ |
ef11bdb3 | 772 | func(is_kabylake) sep \ |
b833d685 | 773 | func(is_preliminary) sep \ |
79fc46df DL |
774 | func(has_fbc) sep \ |
775 | func(has_pipe_cxsr) sep \ | |
776 | func(has_hotplug) sep \ | |
777 | func(cursor_needs_physical) sep \ | |
778 | func(has_overlay) sep \ | |
779 | func(overlay_needs_physical) sep \ | |
780 | func(supports_tv) sep \ | |
dd93be58 | 781 | func(has_llc) sep \ |
ca377809 | 782 | func(has_snoop) sep \ |
30568c45 | 783 | func(has_ddi) sep \ |
33e141ed | 784 | func(has_fpga_dbg) sep \ |
785 | func(has_pooled_eu) | |
c96ea64e | 786 | |
a587f779 DL |
787 | #define DEFINE_FLAG(name) u8 name:1 |
788 | #define SEP_SEMICOLON ; | |
c96ea64e | 789 | |
cfdf1fa2 | 790 | struct intel_device_info { |
10fce67a | 791 | u32 display_mmio_offset; |
87f1f465 | 792 | u16 device_id; |
ac208a8b | 793 | u8 num_pipes; |
d615a166 | 794 | u8 num_sprites[I915_MAX_PIPES]; |
c96c3a8c | 795 | u8 gen; |
ae5702d2 | 796 | u16 gen_mask; |
73ae478c | 797 | u8 ring_mask; /* Rings supported by the HW */ |
a587f779 | 798 | DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); |
a57c774a AK |
799 | /* Register offsets for the various display pipes and transcoders */ |
800 | int pipe_offsets[I915_MAX_TRANSCODERS]; | |
801 | int trans_offsets[I915_MAX_TRANSCODERS]; | |
a57c774a | 802 | int palette_offsets[I915_MAX_PIPES]; |
5efb3e28 | 803 | int cursor_offsets[I915_MAX_PIPES]; |
3873218f JM |
804 | |
805 | /* Slice/subslice/EU info */ | |
806 | u8 slice_total; | |
807 | u8 subslice_total; | |
808 | u8 subslice_per_slice; | |
809 | u8 eu_total; | |
810 | u8 eu_per_subslice; | |
33e141ed | 811 | u8 min_eu_in_pool; |
b7668791 DL |
812 | /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ |
813 | u8 subslice_7eu[3]; | |
3873218f JM |
814 | u8 has_slice_pg:1; |
815 | u8 has_subslice_pg:1; | |
816 | u8 has_eu_pg:1; | |
82cf435b LL |
817 | |
818 | struct color_luts { | |
819 | u16 degamma_lut_size; | |
820 | u16 gamma_lut_size; | |
821 | } color; | |
cfdf1fa2 KH |
822 | }; |
823 | ||
a587f779 DL |
824 | #undef DEFINE_FLAG |
825 | #undef SEP_SEMICOLON | |
826 | ||
7faf1ab2 SV |
827 | enum i915_cache_level { |
828 | I915_CACHE_NONE = 0, | |
350ec881 CW |
829 | I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ |
830 | I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc | |
831 | caches, eg sampler/render caches, and the | |
832 | large Last-Level-Cache. LLC is coherent with | |
833 | the CPU, but L3 is only visible to the GPU. */ | |
651d794f | 834 | I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ |
7faf1ab2 SV |
835 | }; |
836 | ||
e59ec13d MK |
837 | struct i915_ctx_hang_stats { |
838 | /* This context had batch pending when hang was declared */ | |
839 | unsigned batch_pending; | |
840 | ||
841 | /* This context had batch active when hang was declared */ | |
842 | unsigned batch_active; | |
be62acb4 MK |
843 | |
844 | /* Time when this context was last blamed for a GPU reset */ | |
845 | unsigned long guilty_ts; | |
846 | ||
676fa572 CW |
847 | /* If the contexts causes a second GPU hang within this time, |
848 | * it is permanently banned from submitting any more work. | |
849 | */ | |
850 | unsigned long ban_period_seconds; | |
851 | ||
be62acb4 MK |
852 | /* This context is banned to submit more work */ |
853 | bool banned; | |
e59ec13d | 854 | }; |
40521054 BW |
855 | |
856 | /* This must match up with the value previously used for execbuf2.rsvd1. */ | |
821d66dd | 857 | #define DEFAULT_CONTEXT_HANDLE 0 |
b1b38278 | 858 | |
31b7a88d | 859 | /** |
e2efd130 | 860 | * struct i915_gem_context - as the name implies, represents a context. |
31b7a88d OM |
861 | * @ref: reference count. |
862 | * @user_handle: userspace tracking identity for this context. | |
863 | * @remap_slice: l3 row remapping information. | |
b1b38278 DW |
864 | * @flags: context specific flags: |
865 | * CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0. | |
31b7a88d OM |
866 | * @file_priv: filp associated with this context (NULL for global default |
867 | * context). | |
868 | * @hang_stats: information about the role of this context in possible GPU | |
869 | * hangs. | |
7df113e4 | 870 | * @ppgtt: virtual memory space used by this context. |
31b7a88d OM |
871 | * @legacy_hw_ctx: render context backing object and whether it is correctly |
872 | * initialized (legacy ring submission mechanism only). | |
873 | * @link: link in the global list of contexts. | |
874 | * | |
875 | * Contexts are memory images used by the hardware to store copies of their | |
876 | * internal state. | |
877 | */ | |
e2efd130 | 878 | struct i915_gem_context { |
dce3271b | 879 | struct kref ref; |
9ea4feec | 880 | struct drm_i915_private *i915; |
40521054 | 881 | struct drm_i915_file_private *file_priv; |
ae6c4806 | 882 | struct i915_hw_ppgtt *ppgtt; |
a33afea5 | 883 | |
8d59bc6a CW |
884 | struct i915_ctx_hang_stats hang_stats; |
885 | ||
5d1808ec | 886 | /* Unique identifier for this context, used by the hw for tracking */ |
8d59bc6a | 887 | unsigned long flags; |
bc3d6744 CW |
888 | #define CONTEXT_NO_ZEROMAP BIT(0) |
889 | #define CONTEXT_NO_ERROR_CAPTURE BIT(1) | |
5d1808ec | 890 | unsigned hw_id; |
8d59bc6a | 891 | u32 user_handle; |
5d1808ec | 892 | |
0cb26a8e CW |
893 | u32 ggtt_alignment; |
894 | ||
9021ad03 | 895 | struct intel_context { |
c9e003af | 896 | struct drm_i915_gem_object *state; |
7e37f889 | 897 | struct intel_ring *ring; |
ca82580c | 898 | struct i915_vma *lrc_vma; |
82352e90 | 899 | uint32_t *lrc_reg_state; |
8d59bc6a CW |
900 | u64 lrc_desc; |
901 | int pin_count; | |
24f1d3cc | 902 | bool initialised; |
666796da | 903 | } engine[I915_NUM_ENGINES]; |
bcd794c2 | 904 | u32 ring_size; |
c01fc532 | 905 | u32 desc_template; |
3c7ba635 | 906 | struct atomic_notifier_head status_notifier; |
80a9a8db | 907 | bool execlists_force_single_submission; |
c9e003af | 908 | |
a33afea5 | 909 | struct list_head link; |
8d59bc6a CW |
910 | |
911 | u8 remap_slice; | |
40521054 BW |
912 | }; |
913 | ||
a4001f1b PZ |
914 | enum fb_op_origin { |
915 | ORIGIN_GTT, | |
916 | ORIGIN_CPU, | |
917 | ORIGIN_CS, | |
918 | ORIGIN_FLIP, | |
74b4ea1e | 919 | ORIGIN_DIRTYFB, |
a4001f1b PZ |
920 | }; |
921 | ||
ab34a7e8 | 922 | struct intel_fbc { |
25ad93fd PZ |
923 | /* This is always the inner lock when overlapping with struct_mutex and |
924 | * it's the outer lock when overlapping with stolen_lock. */ | |
925 | struct mutex lock; | |
5e59f717 | 926 | unsigned threshold; |
dbef0f15 PZ |
927 | unsigned int possible_framebuffer_bits; |
928 | unsigned int busy_bits; | |
010cf73d | 929 | unsigned int visible_pipes_mask; |
e35fef21 | 930 | struct intel_crtc *crtc; |
5c3fe8b0 | 931 | |
c4213885 | 932 | struct drm_mm_node compressed_fb; |
5c3fe8b0 BW |
933 | struct drm_mm_node *compressed_llb; |
934 | ||
da46f936 RV |
935 | bool false_color; |
936 | ||
d029bcad | 937 | bool enabled; |
0e631adc | 938 | bool active; |
9adccc60 | 939 | |
aaf78d27 PZ |
940 | struct intel_fbc_state_cache { |
941 | struct { | |
942 | unsigned int mode_flags; | |
943 | uint32_t hsw_bdw_pixel_rate; | |
944 | } crtc; | |
945 | ||
946 | struct { | |
947 | unsigned int rotation; | |
948 | int src_w; | |
949 | int src_h; | |
950 | bool visible; | |
951 | } plane; | |
952 | ||
953 | struct { | |
954 | u64 ilk_ggtt_offset; | |
aaf78d27 PZ |
955 | uint32_t pixel_format; |
956 | unsigned int stride; | |
957 | int fence_reg; | |
958 | unsigned int tiling_mode; | |
959 | } fb; | |
960 | } state_cache; | |
961 | ||
b183b3f1 PZ |
962 | struct intel_fbc_reg_params { |
963 | struct { | |
964 | enum pipe pipe; | |
965 | enum plane plane; | |
966 | unsigned int fence_y_offset; | |
967 | } crtc; | |
968 | ||
969 | struct { | |
970 | u64 ggtt_offset; | |
b183b3f1 PZ |
971 | uint32_t pixel_format; |
972 | unsigned int stride; | |
973 | int fence_reg; | |
974 | } fb; | |
975 | ||
976 | int cfb_size; | |
977 | } params; | |
978 | ||
5c3fe8b0 | 979 | struct intel_fbc_work { |
128d7356 | 980 | bool scheduled; |
ca18d51d | 981 | u32 scheduled_vblank; |
128d7356 | 982 | struct work_struct work; |
128d7356 | 983 | } work; |
5c3fe8b0 | 984 | |
bf6189c6 | 985 | const char *no_fbc_reason; |
b5e50c3f JB |
986 | }; |
987 | ||
96178eeb VK |
988 | /** |
989 | * HIGH_RR is the highest eDP panel refresh rate read from EDID | |
990 | * LOW_RR is the lowest eDP panel refresh rate found from EDID | |
991 | * parsing for same resolution. | |
992 | */ | |
993 | enum drrs_refresh_rate_type { | |
994 | DRRS_HIGH_RR, | |
995 | DRRS_LOW_RR, | |
996 | DRRS_MAX_RR, /* RR count */ | |
997 | }; | |
998 | ||
999 | enum drrs_support_type { | |
1000 | DRRS_NOT_SUPPORTED = 0, | |
1001 | STATIC_DRRS_SUPPORT = 1, | |
1002 | SEAMLESS_DRRS_SUPPORT = 2 | |
439d7ac0 PB |
1003 | }; |
1004 | ||
2807cf69 | 1005 | struct intel_dp; |
96178eeb VK |
1006 | struct i915_drrs { |
1007 | struct mutex mutex; | |
1008 | struct delayed_work work; | |
1009 | struct intel_dp *dp; | |
1010 | unsigned busy_frontbuffer_bits; | |
1011 | enum drrs_refresh_rate_type refresh_rate_type; | |
1012 | enum drrs_support_type type; | |
1013 | }; | |
1014 | ||
a031d709 | 1015 | struct i915_psr { |
f0355c4a | 1016 | struct mutex lock; |
a031d709 RV |
1017 | bool sink_support; |
1018 | bool source_ok; | |
2807cf69 | 1019 | struct intel_dp *enabled; |
7c8f8a70 RV |
1020 | bool active; |
1021 | struct delayed_work work; | |
9ca15301 | 1022 | unsigned busy_frontbuffer_bits; |
474d1ec4 SJ |
1023 | bool psr2_support; |
1024 | bool aux_frame_sync; | |
60e5ffe3 | 1025 | bool link_standby; |
3f51e471 | 1026 | }; |
5c3fe8b0 | 1027 | |
3bad0781 | 1028 | enum intel_pch { |
f0350830 | 1029 | PCH_NONE = 0, /* No PCH present */ |
3bad0781 ZW |
1030 | PCH_IBX, /* Ibexpeak PCH */ |
1031 | PCH_CPT, /* Cougarpoint PCH */ | |
eb877ebf | 1032 | PCH_LPT, /* Lynxpoint PCH */ |
e7e7ea20 | 1033 | PCH_SPT, /* Sunrisepoint PCH */ |
22dea0be | 1034 | PCH_KBP, /* Kabypoint PCH */ |
40c7ead9 | 1035 | PCH_NOP, |
3bad0781 ZW |
1036 | }; |
1037 | ||
988d6ee8 PZ |
1038 | enum intel_sbi_destination { |
1039 | SBI_ICLK, | |
1040 | SBI_MPHY, | |
1041 | }; | |
1042 | ||
b690e96c | 1043 | #define QUIRK_PIPEA_FORCE (1<<0) |
435793df | 1044 | #define QUIRK_LVDS_SSC_DISABLE (1<<1) |
4dca20ef | 1045 | #define QUIRK_INVERT_BRIGHTNESS (1<<2) |
9c72cc6f | 1046 | #define QUIRK_BACKLIGHT_PRESENT (1<<3) |
b6b5d049 | 1047 | #define QUIRK_PIPEB_FORCE (1<<4) |
656bfa3a | 1048 | #define QUIRK_PIN_SWIZZLED_PAGES (1<<5) |
b690e96c | 1049 | |
8be48d92 | 1050 | struct intel_fbdev; |
1630fe75 | 1051 | struct intel_fbc_work; |
38651674 | 1052 | |
c2b9152f SV |
1053 | struct intel_gmbus { |
1054 | struct i2c_adapter adapter; | |
3e4d44e0 | 1055 | #define GMBUS_FORCE_BIT_RETRY (1U << 31) |
f2ce9faf | 1056 | u32 force_bit; |
c2b9152f | 1057 | u32 reg0; |
f0f59a00 | 1058 | i915_reg_t gpio_reg; |
c167a6fc | 1059 | struct i2c_algo_bit_data bit_algo; |
c2b9152f SV |
1060 | struct drm_i915_private *dev_priv; |
1061 | }; | |
1062 | ||
f4c956ad | 1063 | struct i915_suspend_saved_registers { |
e948e994 | 1064 | u32 saveDSPARB; |
ba8bbcf6 | 1065 | u32 saveLVDS; |
585fb111 JB |
1066 | u32 savePP_ON_DELAYS; |
1067 | u32 savePP_OFF_DELAYS; | |
ba8bbcf6 JB |
1068 | u32 savePP_ON; |
1069 | u32 savePP_OFF; | |
1070 | u32 savePP_CONTROL; | |
585fb111 | 1071 | u32 savePP_DIVISOR; |
ba8bbcf6 | 1072 | u32 saveFBC_CONTROL; |
1f84e550 | 1073 | u32 saveCACHE_MODE_0; |
1f84e550 | 1074 | u32 saveMI_ARB_STATE; |
ba8bbcf6 JB |
1075 | u32 saveSWF0[16]; |
1076 | u32 saveSWF1[16]; | |
85fa792b | 1077 | u32 saveSWF3[3]; |
4b9de737 | 1078 | uint64_t saveFENCE[I915_MAX_NUM_FENCES]; |
cda2bb78 | 1079 | u32 savePCH_PORT_HOTPLUG; |
9f49c376 | 1080 | u16 saveGCDGMBUS; |
f4c956ad | 1081 | }; |
c85aa885 | 1082 | |
ddeea5b0 ID |
1083 | struct vlv_s0ix_state { |
1084 | /* GAM */ | |
1085 | u32 wr_watermark; | |
1086 | u32 gfx_prio_ctrl; | |
1087 | u32 arb_mode; | |
1088 | u32 gfx_pend_tlb0; | |
1089 | u32 gfx_pend_tlb1; | |
1090 | u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; | |
1091 | u32 media_max_req_count; | |
1092 | u32 gfx_max_req_count; | |
1093 | u32 render_hwsp; | |
1094 | u32 ecochk; | |
1095 | u32 bsd_hwsp; | |
1096 | u32 blt_hwsp; | |
1097 | u32 tlb_rd_addr; | |
1098 | ||
1099 | /* MBC */ | |
1100 | u32 g3dctl; | |
1101 | u32 gsckgctl; | |
1102 | u32 mbctl; | |
1103 | ||
1104 | /* GCP */ | |
1105 | u32 ucgctl1; | |
1106 | u32 ucgctl3; | |
1107 | u32 rcgctl1; | |
1108 | u32 rcgctl2; | |
1109 | u32 rstctl; | |
1110 | u32 misccpctl; | |
1111 | ||
1112 | /* GPM */ | |
1113 | u32 gfxpause; | |
1114 | u32 rpdeuhwtc; | |
1115 | u32 rpdeuc; | |
1116 | u32 ecobus; | |
1117 | u32 pwrdwnupctl; | |
1118 | u32 rp_down_timeout; | |
1119 | u32 rp_deucsw; | |
1120 | u32 rcubmabdtmr; | |
1121 | u32 rcedata; | |
1122 | u32 spare2gh; | |
1123 | ||
1124 | /* Display 1 CZ domain */ | |
1125 | u32 gt_imr; | |
1126 | u32 gt_ier; | |
1127 | u32 pm_imr; | |
1128 | u32 pm_ier; | |
1129 | u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; | |
1130 | ||
1131 | /* GT SA CZ domain */ | |
1132 | u32 tilectl; | |
1133 | u32 gt_fifoctl; | |
1134 | u32 gtlc_wake_ctrl; | |
1135 | u32 gtlc_survive; | |
1136 | u32 pmwgicz; | |
1137 | ||
1138 | /* Display 2 CZ domain */ | |
1139 | u32 gu_ctl0; | |
1140 | u32 gu_ctl1; | |
9c25210f | 1141 | u32 pcbr; |
ddeea5b0 ID |
1142 | u32 clock_gate_dis2; |
1143 | }; | |
1144 | ||
bf225f20 CW |
1145 | struct intel_rps_ei { |
1146 | u32 cz_clock; | |
1147 | u32 render_c0; | |
1148 | u32 media_c0; | |
31685c25 D |
1149 | }; |
1150 | ||
c85aa885 | 1151 | struct intel_gen6_power_mgmt { |
d4d70aa5 ID |
1152 | /* |
1153 | * work, interrupts_enabled and pm_iir are protected by | |
1154 | * dev_priv->irq_lock | |
1155 | */ | |
c85aa885 | 1156 | struct work_struct work; |
d4d70aa5 | 1157 | bool interrupts_enabled; |
c85aa885 | 1158 | u32 pm_iir; |
59cdb63d | 1159 | |
1800ad25 SAK |
1160 | u32 pm_intr_keep; |
1161 | ||
b39fb297 BW |
1162 | /* Frequencies are stored in potentially platform dependent multiples. |
1163 | * In other words, *_freq needs to be multiplied by X to be interesting. | |
1164 | * Soft limits are those which are used for the dynamic reclocking done | |
1165 | * by the driver (raise frequencies under heavy loads, and lower for | |
1166 | * lighter loads). Hard limits are those imposed by the hardware. | |
1167 | * | |
1168 | * A distinction is made for overclocking, which is never enabled by | |
1169 | * default, and is considered to be above the hard limit if it's | |
1170 | * possible at all. | |
1171 | */ | |
1172 | u8 cur_freq; /* Current frequency (cached, may not == HW) */ | |
1173 | u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ | |
1174 | u8 max_freq_softlimit; /* Max frequency permitted by the driver */ | |
1175 | u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ | |
1176 | u8 min_freq; /* AKA RPn. Minimum frequency */ | |
29ecd78d | 1177 | u8 boost_freq; /* Frequency to request when wait boosting */ |
aed242ff | 1178 | u8 idle_freq; /* Frequency to request when we are idle */ |
b39fb297 BW |
1179 | u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ |
1180 | u8 rp1_freq; /* "less than" RP0 power/freqency */ | |
1181 | u8 rp0_freq; /* Non-overclocked max frequency. */ | |
c30fec65 | 1182 | u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ |
1a01ab3b | 1183 | |
8fb55197 CW |
1184 | u8 up_threshold; /* Current %busy required to uplock */ |
1185 | u8 down_threshold; /* Current %busy required to downclock */ | |
1186 | ||
dd75fdc8 CW |
1187 | int last_adj; |
1188 | enum { LOW_POWER, BETWEEN, HIGH_POWER } power; | |
1189 | ||
8d3afd7d CW |
1190 | spinlock_t client_lock; |
1191 | struct list_head clients; | |
1192 | bool client_boost; | |
1193 | ||
c0951f0c | 1194 | bool enabled; |
54b4f68f | 1195 | struct delayed_work autoenable_work; |
1854d5ca | 1196 | unsigned boosts; |
4fc688ce | 1197 | |
bf225f20 CW |
1198 | /* manual wa residency calculations */ |
1199 | struct intel_rps_ei up_ei, down_ei; | |
1200 | ||
4fc688ce JB |
1201 | /* |
1202 | * Protects RPS/RC6 register access and PCU communication. | |
8d3afd7d CW |
1203 | * Must be taken after struct_mutex if nested. Note that |
1204 | * this lock may be held for long periods of time when | |
1205 | * talking to hw - so only take it when talking to hw! | |
4fc688ce JB |
1206 | */ |
1207 | struct mutex hw_lock; | |
c85aa885 SV |
1208 | }; |
1209 | ||
1a240d4d SV |
1210 | /* defined intel_pm.c */ |
1211 | extern spinlock_t mchdev_lock; | |
1212 | ||
c85aa885 SV |
1213 | struct intel_ilk_power_mgmt { |
1214 | u8 cur_delay; | |
1215 | u8 min_delay; | |
1216 | u8 max_delay; | |
1217 | u8 fmax; | |
1218 | u8 fstart; | |
1219 | ||
1220 | u64 last_count1; | |
1221 | unsigned long last_time1; | |
1222 | unsigned long chipset_power; | |
1223 | u64 last_count2; | |
5ed0bdf2 | 1224 | u64 last_time2; |
c85aa885 SV |
1225 | unsigned long gfx_power; |
1226 | u8 corr; | |
1227 | ||
1228 | int c_m; | |
1229 | int r_t; | |
1230 | }; | |
1231 | ||
c6cb582e ID |
1232 | struct drm_i915_private; |
1233 | struct i915_power_well; | |
1234 | ||
1235 | struct i915_power_well_ops { | |
1236 | /* | |
1237 | * Synchronize the well's hw state to match the current sw state, for | |
1238 | * example enable/disable it based on the current refcount. Called | |
1239 | * during driver init and resume time, possibly after first calling | |
1240 | * the enable/disable handlers. | |
1241 | */ | |
1242 | void (*sync_hw)(struct drm_i915_private *dev_priv, | |
1243 | struct i915_power_well *power_well); | |
1244 | /* | |
1245 | * Enable the well and resources that depend on it (for example | |
1246 | * interrupts located on the well). Called after the 0->1 refcount | |
1247 | * transition. | |
1248 | */ | |
1249 | void (*enable)(struct drm_i915_private *dev_priv, | |
1250 | struct i915_power_well *power_well); | |
1251 | /* | |
1252 | * Disable the well and resources that depend on it. Called after | |
1253 | * the 1->0 refcount transition. | |
1254 | */ | |
1255 | void (*disable)(struct drm_i915_private *dev_priv, | |
1256 | struct i915_power_well *power_well); | |
1257 | /* Returns the hw enabled state. */ | |
1258 | bool (*is_enabled)(struct drm_i915_private *dev_priv, | |
1259 | struct i915_power_well *power_well); | |
1260 | }; | |
1261 | ||
a38911a3 WX |
1262 | /* Power well structure for haswell */ |
1263 | struct i915_power_well { | |
c1ca727f | 1264 | const char *name; |
6f3ef5dd | 1265 | bool always_on; |
a38911a3 WX |
1266 | /* power well enable/disable usage count */ |
1267 | int count; | |
bfafe93a ID |
1268 | /* cached hw enabled state */ |
1269 | bool hw_enabled; | |
c1ca727f | 1270 | unsigned long domains; |
77961eb9 | 1271 | unsigned long data; |
c6cb582e | 1272 | const struct i915_power_well_ops *ops; |
a38911a3 WX |
1273 | }; |
1274 | ||
83c00f55 | 1275 | struct i915_power_domains { |
baa70707 ID |
1276 | /* |
1277 | * Power wells needed for initialization at driver init and suspend | |
1278 | * time are on. They are kept on until after the first modeset. | |
1279 | */ | |
1280 | bool init_power_on; | |
0d116a29 | 1281 | bool initializing; |
c1ca727f | 1282 | int power_well_count; |
baa70707 | 1283 | |
83c00f55 | 1284 | struct mutex lock; |
1da51581 | 1285 | int domain_use_count[POWER_DOMAIN_NUM]; |
c1ca727f | 1286 | struct i915_power_well *power_wells; |
83c00f55 ID |
1287 | }; |
1288 | ||
35a85ac6 | 1289 | #define MAX_L3_SLICES 2 |
a4da4fa4 | 1290 | struct intel_l3_parity { |
35a85ac6 | 1291 | u32 *remap_info[MAX_L3_SLICES]; |
a4da4fa4 | 1292 | struct work_struct error_work; |
35a85ac6 | 1293 | int which_slice; |
a4da4fa4 SV |
1294 | }; |
1295 | ||
4b5aed62 | 1296 | struct i915_gem_mm { |
4b5aed62 SV |
1297 | /** Memory allocator for GTT stolen memory */ |
1298 | struct drm_mm stolen; | |
92e97d2f PZ |
1299 | /** Protects the usage of the GTT stolen memory allocator. This is |
1300 | * always the inner lock when overlapping with struct_mutex. */ | |
1301 | struct mutex stolen_lock; | |
1302 | ||
4b5aed62 SV |
1303 | /** List of all objects in gtt_space. Used to restore gtt |
1304 | * mappings on resume */ | |
1305 | struct list_head bound_list; | |
1306 | /** | |
1307 | * List of objects which are not bound to the GTT (thus | |
1308 | * are idle and not used by the GPU) but still have | |
1309 | * (presumably uncached) pages still attached. | |
1310 | */ | |
1311 | struct list_head unbound_list; | |
1312 | ||
1313 | /** Usable portion of the GTT for GEM */ | |
1314 | unsigned long stolen_base; /* limited to low memory (32-bit) */ | |
1315 | ||
4b5aed62 SV |
1316 | /** PPGTT used for aliasing the PPGTT with the GTT */ |
1317 | struct i915_hw_ppgtt *aliasing_ppgtt; | |
1318 | ||
2cfcd32a | 1319 | struct notifier_block oom_notifier; |
e87666b5 | 1320 | struct notifier_block vmap_notifier; |
ceabbba5 | 1321 | struct shrinker shrinker; |
4b5aed62 SV |
1322 | bool shrinker_no_lock_stealing; |
1323 | ||
4b5aed62 SV |
1324 | /** LRU list of objects with fence regs on them. */ |
1325 | struct list_head fence_list; | |
1326 | ||
4b5aed62 SV |
1327 | /** |
1328 | * Are we in a non-interruptible section of code like | |
1329 | * modesetting? | |
1330 | */ | |
1331 | bool interruptible; | |
1332 | ||
bdf1e7e3 | 1333 | /* the indicator for dispatch video commands on two BSD rings */ |
c80ff16e | 1334 | unsigned int bsd_engine_dispatch_index; |
bdf1e7e3 | 1335 | |
4b5aed62 SV |
1336 | /** Bit 6 swizzling required for X tiling */ |
1337 | uint32_t bit_6_swizzle_x; | |
1338 | /** Bit 6 swizzling required for Y tiling */ | |
1339 | uint32_t bit_6_swizzle_y; | |
1340 | ||
4b5aed62 | 1341 | /* accounting, useful for userland debugging */ |
c20e8355 | 1342 | spinlock_t object_stat_lock; |
4b5aed62 SV |
1343 | size_t object_memory; |
1344 | u32 object_count; | |
1345 | }; | |
1346 | ||
edc3d884 | 1347 | struct drm_i915_error_state_buf { |
0a4cd7c8 | 1348 | struct drm_i915_private *i915; |
edc3d884 MK |
1349 | unsigned bytes; |
1350 | unsigned size; | |
1351 | int err; | |
1352 | u8 *buf; | |
1353 | loff_t start; | |
1354 | loff_t pos; | |
1355 | }; | |
1356 | ||
fc16b48b MK |
1357 | struct i915_error_state_file_priv { |
1358 | struct drm_device *dev; | |
1359 | struct drm_i915_error_state *error; | |
1360 | }; | |
1361 | ||
99584db3 SV |
1362 | struct i915_gpu_error { |
1363 | /* For hangcheck timer */ | |
1364 | #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ | |
1365 | #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) | |
be62acb4 MK |
1366 | /* Hang gpu twice in this window and your context gets banned */ |
1367 | #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) | |
1368 | ||
737b1506 | 1369 | struct delayed_work hangcheck_work; |
99584db3 SV |
1370 | |
1371 | /* For reset and error_state handling. */ | |
1372 | spinlock_t lock; | |
1373 | /* Protected by the above dev->gpu_error.lock. */ | |
1374 | struct drm_i915_error_state *first_error; | |
094f9a54 CW |
1375 | |
1376 | unsigned long missed_irq_rings; | |
1377 | ||
1f83fee0 | 1378 | /** |
2ac0f450 | 1379 | * State variable controlling the reset flow and count |
1f83fee0 | 1380 | * |
2ac0f450 MK |
1381 | * This is a counter which gets incremented when reset is triggered, |
1382 | * and again when reset has been handled. So odd values (lowest bit set) | |
1383 | * means that reset is in progress and even values that | |
1384 | * (reset_counter >> 1):th reset was successfully completed. | |
1385 | * | |
1386 | * If reset is not completed succesfully, the I915_WEDGE bit is | |
1387 | * set meaning that hardware is terminally sour and there is no | |
1388 | * recovery. All waiters on the reset_queue will be woken when | |
1389 | * that happens. | |
1390 | * | |
1391 | * This counter is used by the wait_seqno code to notice that reset | |
1392 | * event happened and it needs to restart the entire ioctl (since most | |
1393 | * likely the seqno it waited for won't ever signal anytime soon). | |
f69061be SV |
1394 | * |
1395 | * This is important for lock-free wait paths, where no contended lock | |
1396 | * naturally enforces the correct ordering between the bail-out of the | |
1397 | * waiter and the gpu reset work code. | |
1f83fee0 SV |
1398 | */ |
1399 | atomic_t reset_counter; | |
1400 | ||
1f83fee0 | 1401 | #define I915_RESET_IN_PROGRESS_FLAG 1 |
2ac0f450 | 1402 | #define I915_WEDGED (1 << 31) |
1f83fee0 | 1403 | |
1f15b76f CW |
1404 | /** |
1405 | * Waitqueue to signal when a hang is detected. Used to for waiters | |
1406 | * to release the struct_mutex for the reset to procede. | |
1407 | */ | |
1408 | wait_queue_head_t wait_queue; | |
1409 | ||
1f83fee0 SV |
1410 | /** |
1411 | * Waitqueue to signal when the reset has completed. Used by clients | |
1412 | * that wait for dev_priv->mm.wedged to settle. | |
1413 | */ | |
1414 | wait_queue_head_t reset_queue; | |
33196ded | 1415 | |
094f9a54 | 1416 | /* For missed irq/seqno simulation. */ |
688e6c72 | 1417 | unsigned long test_irq_rings; |
99584db3 SV |
1418 | }; |
1419 | ||
b8efb17b ZR |
1420 | enum modeset_restore { |
1421 | MODESET_ON_LID_OPEN, | |
1422 | MODESET_DONE, | |
1423 | MODESET_SUSPENDED, | |
1424 | }; | |
1425 | ||
500ea70d RV |
1426 | #define DP_AUX_A 0x40 |
1427 | #define DP_AUX_B 0x10 | |
1428 | #define DP_AUX_C 0x20 | |
1429 | #define DP_AUX_D 0x30 | |
1430 | ||
11c1b657 XZ |
1431 | #define DDC_PIN_B 0x05 |
1432 | #define DDC_PIN_C 0x04 | |
1433 | #define DDC_PIN_D 0x06 | |
1434 | ||
6acab15a | 1435 | struct ddi_vbt_port_info { |
ce4dd49e DL |
1436 | /* |
1437 | * This is an index in the HDMI/DVI DDI buffer translation table. | |
1438 | * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't | |
1439 | * populate this field. | |
1440 | */ | |
1441 | #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff | |
6acab15a | 1442 | uint8_t hdmi_level_shift; |
311a2094 PZ |
1443 | |
1444 | uint8_t supports_dvi:1; | |
1445 | uint8_t supports_hdmi:1; | |
1446 | uint8_t supports_dp:1; | |
500ea70d RV |
1447 | |
1448 | uint8_t alternate_aux_channel; | |
11c1b657 | 1449 | uint8_t alternate_ddc_pin; |
75067dde AK |
1450 | |
1451 | uint8_t dp_boost_level; | |
1452 | uint8_t hdmi_boost_level; | |
6acab15a PZ |
1453 | }; |
1454 | ||
bfd7ebda RV |
1455 | enum psr_lines_to_wait { |
1456 | PSR_0_LINES_TO_WAIT = 0, | |
1457 | PSR_1_LINE_TO_WAIT, | |
1458 | PSR_4_LINES_TO_WAIT, | |
1459 | PSR_8_LINES_TO_WAIT | |
83a7280e PB |
1460 | }; |
1461 | ||
41aa3448 RV |
1462 | struct intel_vbt_data { |
1463 | struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ | |
1464 | struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ | |
1465 | ||
1466 | /* Feature bits */ | |
1467 | unsigned int int_tv_support:1; | |
1468 | unsigned int lvds_dither:1; | |
1469 | unsigned int lvds_vbt:1; | |
1470 | unsigned int int_crt_support:1; | |
1471 | unsigned int lvds_use_ssc:1; | |
1472 | unsigned int display_clock_mode:1; | |
1473 | unsigned int fdi_rx_polarity_inverted:1; | |
3e845c7a | 1474 | unsigned int panel_type:4; |
41aa3448 RV |
1475 | int lvds_ssc_freq; |
1476 | unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ | |
1477 | ||
83a7280e PB |
1478 | enum drrs_support_type drrs_type; |
1479 | ||
6aa23e65 JN |
1480 | struct { |
1481 | int rate; | |
1482 | int lanes; | |
1483 | int preemphasis; | |
1484 | int vswing; | |
06411f08 | 1485 | bool low_vswing; |
6aa23e65 JN |
1486 | bool initialized; |
1487 | bool support; | |
1488 | int bpp; | |
1489 | struct edp_power_seq pps; | |
1490 | } edp; | |
41aa3448 | 1491 | |
bfd7ebda RV |
1492 | struct { |
1493 | bool full_link; | |
1494 | bool require_aux_wakeup; | |
1495 | int idle_frames; | |
1496 | enum psr_lines_to_wait lines_to_wait; | |
1497 | int tp1_wakeup_time; | |
1498 | int tp2_tp3_wakeup_time; | |
1499 | } psr; | |
1500 | ||
f00076d2 JN |
1501 | struct { |
1502 | u16 pwm_freq_hz; | |
39fbc9c8 | 1503 | bool present; |
f00076d2 | 1504 | bool active_low_pwm; |
1de6068e | 1505 | u8 min_brightness; /* min_brightness/255 of max */ |
9a41e17d | 1506 | enum intel_backlight_type type; |
f00076d2 JN |
1507 | } backlight; |
1508 | ||
d17c5443 SK |
1509 | /* MIPI DSI */ |
1510 | struct { | |
1511 | u16 panel_id; | |
d3b542fc SK |
1512 | struct mipi_config *config; |
1513 | struct mipi_pps_data *pps; | |
1514 | u8 seq_version; | |
1515 | u32 size; | |
1516 | u8 *data; | |
8d3ed2f3 | 1517 | const u8 *sequence[MIPI_SEQ_MAX]; |
d17c5443 SK |
1518 | } dsi; |
1519 | ||
41aa3448 RV |
1520 | int crt_ddc_pin; |
1521 | ||
1522 | int child_dev_num; | |
768f69c9 | 1523 | union child_device_config *child_dev; |
6acab15a PZ |
1524 | |
1525 | struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; | |
9d6c875d | 1526 | struct sdvo_device_mapping sdvo_mappings[2]; |
41aa3448 RV |
1527 | }; |
1528 | ||
77c122bc VS |
1529 | enum intel_ddb_partitioning { |
1530 | INTEL_DDB_PART_1_2, | |
1531 | INTEL_DDB_PART_5_6, /* IVB+ */ | |
1532 | }; | |
1533 | ||
1fd527cc VS |
1534 | struct intel_wm_level { |
1535 | bool enable; | |
1536 | uint32_t pri_val; | |
1537 | uint32_t spr_val; | |
1538 | uint32_t cur_val; | |
1539 | uint32_t fbc_val; | |
1540 | }; | |
1541 | ||
820c1980 | 1542 | struct ilk_wm_values { |
609cedef VS |
1543 | uint32_t wm_pipe[3]; |
1544 | uint32_t wm_lp[3]; | |
1545 | uint32_t wm_lp_spr[3]; | |
1546 | uint32_t wm_linetime[3]; | |
1547 | bool enable_fbc_wm; | |
1548 | enum intel_ddb_partitioning partitioning; | |
1549 | }; | |
1550 | ||
262cd2e1 VS |
1551 | struct vlv_pipe_wm { |
1552 | uint16_t primary; | |
1553 | uint16_t sprite[2]; | |
1554 | uint8_t cursor; | |
1555 | }; | |
ae80152d | 1556 | |
262cd2e1 VS |
1557 | struct vlv_sr_wm { |
1558 | uint16_t plane; | |
1559 | uint8_t cursor; | |
1560 | }; | |
ae80152d | 1561 | |
262cd2e1 VS |
1562 | struct vlv_wm_values { |
1563 | struct vlv_pipe_wm pipe[3]; | |
1564 | struct vlv_sr_wm sr; | |
0018fda1 VS |
1565 | struct { |
1566 | uint8_t cursor; | |
1567 | uint8_t sprite[2]; | |
1568 | uint8_t primary; | |
1569 | } ddl[3]; | |
6eb1a681 VS |
1570 | uint8_t level; |
1571 | bool cxsr; | |
0018fda1 VS |
1572 | }; |
1573 | ||
c193924e | 1574 | struct skl_ddb_entry { |
16160e3d | 1575 | uint16_t start, end; /* in number of blocks, 'end' is exclusive */ |
c193924e DL |
1576 | }; |
1577 | ||
1578 | static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry) | |
1579 | { | |
16160e3d | 1580 | return entry->end - entry->start; |
c193924e DL |
1581 | } |
1582 | ||
08db6652 DL |
1583 | static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, |
1584 | const struct skl_ddb_entry *e2) | |
1585 | { | |
1586 | if (e1->start == e2->start && e1->end == e2->end) | |
1587 | return true; | |
1588 | ||
1589 | return false; | |
1590 | } | |
1591 | ||
c193924e | 1592 | struct skl_ddb_allocation { |
34bb56af | 1593 | struct skl_ddb_entry pipe[I915_MAX_PIPES]; |
2cd601c6 | 1594 | struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */ |
4969d33e | 1595 | struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; |
c193924e DL |
1596 | }; |
1597 | ||
2ac96d2a | 1598 | struct skl_wm_values { |
2b4b9f35 | 1599 | unsigned dirty_pipes; |
c193924e | 1600 | struct skl_ddb_allocation ddb; |
2ac96d2a PB |
1601 | uint32_t wm_linetime[I915_MAX_PIPES]; |
1602 | uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; | |
2ac96d2a | 1603 | uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES]; |
2ac96d2a PB |
1604 | }; |
1605 | ||
1606 | struct skl_wm_level { | |
1607 | bool plane_en[I915_MAX_PLANES]; | |
1608 | uint16_t plane_res_b[I915_MAX_PLANES]; | |
1609 | uint8_t plane_res_l[I915_MAX_PLANES]; | |
2ac96d2a PB |
1610 | }; |
1611 | ||
c67a470b | 1612 | /* |
765dab67 PZ |
1613 | * This struct helps tracking the state needed for runtime PM, which puts the |
1614 | * device in PCI D3 state. Notice that when this happens, nothing on the | |
1615 | * graphics device works, even register access, so we don't get interrupts nor | |
1616 | * anything else. | |
c67a470b | 1617 | * |
765dab67 PZ |
1618 | * Every piece of our code that needs to actually touch the hardware needs to |
1619 | * either call intel_runtime_pm_get or call intel_display_power_get with the | |
1620 | * appropriate power domain. | |
a8a8bd54 | 1621 | * |
765dab67 PZ |
1622 | * Our driver uses the autosuspend delay feature, which means we'll only really |
1623 | * suspend if we stay with zero refcount for a certain amount of time. The | |
f458ebbc | 1624 | * default value is currently very conservative (see intel_runtime_pm_enable), but |
765dab67 | 1625 | * it can be changed with the standard runtime PM files from sysfs. |
c67a470b PZ |
1626 | * |
1627 | * The irqs_disabled variable becomes true exactly after we disable the IRQs and | |
1628 | * goes back to false exactly before we reenable the IRQs. We use this variable | |
1629 | * to check if someone is trying to enable/disable IRQs while they're supposed | |
1630 | * to be disabled. This shouldn't happen and we'll print some error messages in | |
730488b2 | 1631 | * case it happens. |
c67a470b | 1632 | * |
765dab67 | 1633 | * For more, read the Documentation/power/runtime_pm.txt. |
c67a470b | 1634 | */ |
5d584b2e | 1635 | struct i915_runtime_pm { |
1f814dac | 1636 | atomic_t wakeref_count; |
2b19efeb | 1637 | atomic_t atomic_seq; |
5d584b2e | 1638 | bool suspended; |
2aeb7d3a | 1639 | bool irqs_enabled; |
c67a470b PZ |
1640 | }; |
1641 | ||
926321d5 SV |
1642 | enum intel_pipe_crc_source { |
1643 | INTEL_PIPE_CRC_SOURCE_NONE, | |
1644 | INTEL_PIPE_CRC_SOURCE_PLANE1, | |
1645 | INTEL_PIPE_CRC_SOURCE_PLANE2, | |
1646 | INTEL_PIPE_CRC_SOURCE_PF, | |
5b3a856b | 1647 | INTEL_PIPE_CRC_SOURCE_PIPE, |
3d099a05 SV |
1648 | /* TV/DP on pre-gen5/vlv can't use the pipe source. */ |
1649 | INTEL_PIPE_CRC_SOURCE_TV, | |
1650 | INTEL_PIPE_CRC_SOURCE_DP_B, | |
1651 | INTEL_PIPE_CRC_SOURCE_DP_C, | |
1652 | INTEL_PIPE_CRC_SOURCE_DP_D, | |
46a19188 | 1653 | INTEL_PIPE_CRC_SOURCE_AUTO, |
926321d5 SV |
1654 | INTEL_PIPE_CRC_SOURCE_MAX, |
1655 | }; | |
1656 | ||
8bf1e9f1 | 1657 | struct intel_pipe_crc_entry { |
ac2300d4 | 1658 | uint32_t frame; |
8bf1e9f1 SH |
1659 | uint32_t crc[5]; |
1660 | }; | |
1661 | ||
b2c88f5b | 1662 | #define INTEL_PIPE_CRC_ENTRIES_NR 128 |
8bf1e9f1 | 1663 | struct intel_pipe_crc { |
d538bbdf DL |
1664 | spinlock_t lock; |
1665 | bool opened; /* exclusive access to the result file */ | |
e5f75aca | 1666 | struct intel_pipe_crc_entry *entries; |
926321d5 | 1667 | enum intel_pipe_crc_source source; |
d538bbdf | 1668 | int head, tail; |
07144428 | 1669 | wait_queue_head_t wq; |
8bf1e9f1 SH |
1670 | }; |
1671 | ||
f99d7069 SV |
1672 | struct i915_frontbuffer_tracking { |
1673 | struct mutex lock; | |
1674 | ||
1675 | /* | |
1676 | * Tracking bits for delayed frontbuffer flushing du to gpu activity or | |
1677 | * scheduled flips. | |
1678 | */ | |
1679 | unsigned busy_bits; | |
1680 | unsigned flip_bits; | |
1681 | }; | |
1682 | ||
7225342a | 1683 | struct i915_wa_reg { |
f0f59a00 | 1684 | i915_reg_t addr; |
7225342a MK |
1685 | u32 value; |
1686 | /* bitmask representing WA bits */ | |
1687 | u32 mask; | |
1688 | }; | |
1689 | ||
33136b06 AS |
1690 | /* |
1691 | * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only | |
1692 | * allowing it for RCS as we don't foresee any requirement of having | |
1693 | * a whitelist for other engines. When it is really required for | |
1694 | * other engines then the limit need to be increased. | |
1695 | */ | |
1696 | #define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS) | |
7225342a MK |
1697 | |
1698 | struct i915_workarounds { | |
1699 | struct i915_wa_reg reg[I915_MAX_WA_REGS]; | |
1700 | u32 count; | |
666796da | 1701 | u32 hw_whitelist_count[I915_NUM_ENGINES]; |
7225342a MK |
1702 | }; |
1703 | ||
cf9d2890 YZ |
1704 | struct i915_virtual_gpu { |
1705 | bool active; | |
1706 | }; | |
1707 | ||
aa363136 MR |
1708 | /* used in computing the new watermarks state */ |
1709 | struct intel_wm_config { | |
1710 | unsigned int num_pipes_active; | |
1711 | bool sprites_enabled; | |
1712 | bool sprites_scaled; | |
1713 | }; | |
1714 | ||
77fec556 | 1715 | struct drm_i915_private { |
8f460e2c CW |
1716 | struct drm_device drm; |
1717 | ||
efab6d8d | 1718 | struct kmem_cache *objects; |
e20d2ab7 | 1719 | struct kmem_cache *vmas; |
efab6d8d | 1720 | struct kmem_cache *requests; |
f4c956ad | 1721 | |
5c969aa7 | 1722 | const struct intel_device_info info; |
f4c956ad SV |
1723 | |
1724 | int relative_constants_mode; | |
1725 | ||
1726 | void __iomem *regs; | |
1727 | ||
907b28c5 | 1728 | struct intel_uncore uncore; |
f4c956ad | 1729 | |
cf9d2890 YZ |
1730 | struct i915_virtual_gpu vgpu; |
1731 | ||
0ad35fed ZW |
1732 | struct intel_gvt gvt; |
1733 | ||
33a732f4 AD |
1734 | struct intel_guc guc; |
1735 | ||
eb805623 SV |
1736 | struct intel_csr csr; |
1737 | ||
5ea6e5e3 | 1738 | struct intel_gmbus gmbus[GMBUS_NUM_PINS]; |
28c70f16 | 1739 | |
f4c956ad SV |
1740 | /** gmbus_mutex protects against concurrent usage of the single hw gmbus |
1741 | * controller on different i2c buses. */ | |
1742 | struct mutex gmbus_mutex; | |
1743 | ||
1744 | /** | |
1745 | * Base address of the gmbus and gpio block. | |
1746 | */ | |
1747 | uint32_t gpio_mmio_base; | |
1748 | ||
b6fdd0f2 SS |
1749 | /* MMIO base address for MIPI regs */ |
1750 | uint32_t mipi_mmio_base; | |
1751 | ||
443a389f VS |
1752 | uint32_t psr_mmio_base; |
1753 | ||
28c70f16 SV |
1754 | wait_queue_head_t gmbus_wait_queue; |
1755 | ||
f4c956ad | 1756 | struct pci_dev *bridge_dev; |
0ca5fa3a | 1757 | struct i915_gem_context *kernel_context; |
666796da | 1758 | struct intel_engine_cs engine[I915_NUM_ENGINES]; |
3e78998a | 1759 | struct drm_i915_gem_object *semaphore_obj; |
ddf07be7 | 1760 | u32 next_seqno; |
f4c956ad | 1761 | |
ba8286fa | 1762 | struct drm_dma_handle *status_page_dmah; |
f4c956ad SV |
1763 | struct resource mch_res; |
1764 | ||
f4c956ad SV |
1765 | /* protects the irq masks */ |
1766 | spinlock_t irq_lock; | |
1767 | ||
84c33a64 SG |
1768 | /* protects the mmio flip data */ |
1769 | spinlock_t mmio_flip_lock; | |
1770 | ||
f8b79e58 ID |
1771 | bool display_irqs_enabled; |
1772 | ||
9ee32fea SV |
1773 | /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ |
1774 | struct pm_qos_request pm_qos; | |
1775 | ||
a580516d VS |
1776 | /* Sideband mailbox protection */ |
1777 | struct mutex sb_lock; | |
f4c956ad SV |
1778 | |
1779 | /** Cached value of IMR to avoid reads in updating the bitfield */ | |
abd58f01 BW |
1780 | union { |
1781 | u32 irq_mask; | |
1782 | u32 de_irq_mask[I915_MAX_PIPES]; | |
1783 | }; | |
f4c956ad | 1784 | u32 gt_irq_mask; |
605cd25b | 1785 | u32 pm_irq_mask; |
a6706b45 | 1786 | u32 pm_rps_events; |
91d181dd | 1787 | u32 pipestat_irq_mask[I915_MAX_PIPES]; |
f4c956ad | 1788 | |
5fcece80 | 1789 | struct i915_hotplug hotplug; |
ab34a7e8 | 1790 | struct intel_fbc fbc; |
439d7ac0 | 1791 | struct i915_drrs drrs; |
f4c956ad | 1792 | struct intel_opregion opregion; |
41aa3448 | 1793 | struct intel_vbt_data vbt; |
f4c956ad | 1794 | |
d9ceb816 JB |
1795 | bool preserve_bios_swizzle; |
1796 | ||
f4c956ad SV |
1797 | /* overlay */ |
1798 | struct intel_overlay *overlay; | |
f4c956ad | 1799 | |
58c68779 | 1800 | /* backlight registers and fields in struct intel_panel */ |
07f11d49 | 1801 | struct mutex backlight_lock; |
31ad8ec6 | 1802 | |
f4c956ad | 1803 | /* LVDS info */ |
f4c956ad SV |
1804 | bool no_aux_handshake; |
1805 | ||
e39b999a VS |
1806 | /* protects panel power sequencer state */ |
1807 | struct mutex pps_mutex; | |
1808 | ||
f4c956ad | 1809 | struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ |
f4c956ad SV |
1810 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ |
1811 | ||
1812 | unsigned int fsb_freq, mem_freq, is_ddr3; | |
b2045352 | 1813 | unsigned int skl_preferred_vco_freq; |
1a617b77 | 1814 | unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; |
adafdc6f | 1815 | unsigned int max_dotclk_freq; |
e7dc33f3 | 1816 | unsigned int rawclk_freq; |
6bcda4f0 | 1817 | unsigned int hpll_freq; |
bfa7df01 | 1818 | unsigned int czclk_freq; |
f4c956ad | 1819 | |
63911d72 | 1820 | struct { |
709e05c3 | 1821 | unsigned int vco, ref; |
63911d72 VS |
1822 | } cdclk_pll; |
1823 | ||
645416f5 SV |
1824 | /** |
1825 | * wq - Driver workqueue for GEM. | |
1826 | * | |
1827 | * NOTE: Work items scheduled here are not allowed to grab any modeset | |
1828 | * locks, for otherwise the flushing done in the pageflip code will | |
1829 | * result in deadlocks. | |
1830 | */ | |
f4c956ad SV |
1831 | struct workqueue_struct *wq; |
1832 | ||
1833 | /* Display functions */ | |
1834 | struct drm_i915_display_funcs display; | |
1835 | ||
1836 | /* PCH chipset type */ | |
1837 | enum intel_pch pch_type; | |
17a303ec | 1838 | unsigned short pch_id; |
f4c956ad SV |
1839 | |
1840 | unsigned long quirks; | |
1841 | ||
b8efb17b ZR |
1842 | enum modeset_restore modeset_restore; |
1843 | struct mutex modeset_restore_lock; | |
e2c8b870 | 1844 | struct drm_atomic_state *modeset_restore_state; |
673a394b | 1845 | |
a7bbbd63 | 1846 | struct list_head vm_list; /* Global list of all address spaces */ |
62106b4f | 1847 | struct i915_ggtt ggtt; /* VM representing the global address space */ |
5d4545ae | 1848 | |
4b5aed62 | 1849 | struct i915_gem_mm mm; |
ad46cb53 CW |
1850 | DECLARE_HASHTABLE(mm_structs, 7); |
1851 | struct mutex mm_lock; | |
8781342d | 1852 | |
5d1808ec CW |
1853 | /* The hw wants to have a stable context identifier for the lifetime |
1854 | * of the context (for OA, PASID, faults, etc). This is limited | |
1855 | * in execlists to 21 bits. | |
1856 | */ | |
1857 | struct ida context_hw_ida; | |
1858 | #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ | |
1859 | ||
8781342d SV |
1860 | /* Kernel Modesetting */ |
1861 | ||
76c4ac04 DL |
1862 | struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; |
1863 | struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; | |
6b95a207 KH |
1864 | wait_queue_head_t pending_flip_queue; |
1865 | ||
c4597872 SV |
1866 | #ifdef CONFIG_DEBUG_FS |
1867 | struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; | |
1868 | #endif | |
1869 | ||
565602d7 | 1870 | /* dpll and cdclk state is protected by connection_mutex */ |
e72f9fbf SV |
1871 | int num_shared_dpll; |
1872 | struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; | |
f9476a6c | 1873 | const struct intel_dpll_mgr *dpll_mgr; |
565602d7 | 1874 | |
fbf6d879 ML |
1875 | /* |
1876 | * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll. | |
1877 | * Must be global rather than per dpll, because on some platforms | |
1878 | * plls share registers. | |
1879 | */ | |
1880 | struct mutex dpll_lock; | |
1881 | ||
565602d7 ML |
1882 | unsigned int active_crtcs; |
1883 | unsigned int min_pixclk[I915_MAX_PIPES]; | |
1884 | ||
e4607fcf | 1885 | int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; |
ee7b9f93 | 1886 | |
7225342a | 1887 | struct i915_workarounds workarounds; |
888b5995 | 1888 | |
f99d7069 SV |
1889 | struct i915_frontbuffer_tracking fb_tracking; |
1890 | ||
652c393a | 1891 | u16 orig_clock; |
f97108d1 | 1892 | |
c4804411 | 1893 | bool mchbar_need_disable; |
f97108d1 | 1894 | |
a4da4fa4 SV |
1895 | struct intel_l3_parity l3_parity; |
1896 | ||
59124506 | 1897 | /* Cannot be determined by PCIID. You must always read a register. */ |
3accaf7e | 1898 | u32 edram_cap; |
59124506 | 1899 | |
c6a828d3 | 1900 | /* gen6+ rps state */ |
c85aa885 | 1901 | struct intel_gen6_power_mgmt rps; |
c6a828d3 | 1902 | |
20e4d407 SV |
1903 | /* ilk-only ips/rps state. Everything in here is protected by the global |
1904 | * mchdev_lock in intel_pm.c */ | |
c85aa885 | 1905 | struct intel_ilk_power_mgmt ips; |
b5e50c3f | 1906 | |
83c00f55 | 1907 | struct i915_power_domains power_domains; |
a38911a3 | 1908 | |
a031d709 | 1909 | struct i915_psr psr; |
3f51e471 | 1910 | |
99584db3 | 1911 | struct i915_gpu_error gpu_error; |
ae681d96 | 1912 | |
c9cddffc JB |
1913 | struct drm_i915_gem_object *vlv_pctx; |
1914 | ||
0695726e | 1915 | #ifdef CONFIG_DRM_FBDEV_EMULATION |
8be48d92 DA |
1916 | /* list of fbdev register on this device */ |
1917 | struct intel_fbdev *fbdev; | |
82e3b8c1 | 1918 | struct work_struct fbdev_suspend_work; |
4520f53a | 1919 | #endif |
e953fd7b CW |
1920 | |
1921 | struct drm_property *broadcast_rgb_property; | |
3f43c48d | 1922 | struct drm_property *force_audio_property; |
e3689190 | 1923 | |
58fddc28 | 1924 | /* hda/i915 audio component */ |
51e1d83c | 1925 | struct i915_audio_component *audio_component; |
58fddc28 | 1926 | bool audio_component_registered; |
4a21ef7d LY |
1927 | /** |
1928 | * av_mutex - mutex for audio/video sync | |
1929 | * | |
1930 | */ | |
1931 | struct mutex av_mutex; | |
58fddc28 | 1932 | |
254f965c | 1933 | uint32_t hw_context_size; |
a33afea5 | 1934 | struct list_head context_list; |
f4c956ad | 1935 | |
3e68320e | 1936 | u32 fdi_rx_config; |
68d18ad7 | 1937 | |
c231775c | 1938 | /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ |
70722468 | 1939 | u32 chv_phy_control; |
c231775c VS |
1940 | /* |
1941 | * Shadows for CHV DPLL_MD regs to keep the state | |
1942 | * checker somewhat working in the presence hardware | |
1943 | * crappiness (can't read out DPLL_MD for pipes B & C). | |
1944 | */ | |
1945 | u32 chv_dpll_md[I915_MAX_PIPES]; | |
adc7f04b | 1946 | u32 bxt_phy_grc; |
70722468 | 1947 | |
842f1c8b | 1948 | u32 suspend_count; |
bc87229f | 1949 | bool suspended_to_idle; |
f4c956ad | 1950 | struct i915_suspend_saved_registers regfile; |
ddeea5b0 | 1951 | struct vlv_s0ix_state vlv_s0ix_state; |
231f42a4 | 1952 | |
53615a5e VS |
1953 | struct { |
1954 | /* | |
1955 | * Raw watermark latency values: | |
1956 | * in 0.1us units for WM0, | |
1957 | * in 0.5us units for WM1+. | |
1958 | */ | |
1959 | /* primary */ | |
1960 | uint16_t pri_latency[5]; | |
1961 | /* sprite */ | |
1962 | uint16_t spr_latency[5]; | |
1963 | /* cursor */ | |
1964 | uint16_t cur_latency[5]; | |
2af30a5c PB |
1965 | /* |
1966 | * Raw watermark memory latency values | |
1967 | * for SKL for all 8 levels | |
1968 | * in 1us units. | |
1969 | */ | |
1970 | uint16_t skl_latency[8]; | |
609cedef | 1971 | |
2d41c0b5 PB |
1972 | /* |
1973 | * The skl_wm_values structure is a bit too big for stack | |
1974 | * allocation, so we keep the staging struct where we store | |
1975 | * intermediate results here instead. | |
1976 | */ | |
1977 | struct skl_wm_values skl_results; | |
1978 | ||
609cedef | 1979 | /* current hardware state */ |
2d41c0b5 PB |
1980 | union { |
1981 | struct ilk_wm_values hw; | |
1982 | struct skl_wm_values skl_hw; | |
0018fda1 | 1983 | struct vlv_wm_values vlv; |
2d41c0b5 | 1984 | }; |
58590c14 VS |
1985 | |
1986 | uint8_t max_level; | |
ed4a6a7c MR |
1987 | |
1988 | /* | |
1989 | * Should be held around atomic WM register writing; also | |
1990 | * protects * intel_crtc->wm.active and | |
1991 | * cstate->wm.need_postvbl_update. | |
1992 | */ | |
1993 | struct mutex wm_mutex; | |
279e99d7 MR |
1994 | |
1995 | /* | |
1996 | * Set during HW readout of watermarks/DDB. Some platforms | |
1997 | * need to know when we're still using BIOS-provided values | |
1998 | * (which we don't fully trust). | |
1999 | */ | |
2000 | bool distrust_bios_wm; | |
53615a5e VS |
2001 | } wm; |
2002 | ||
8a187455 PZ |
2003 | struct i915_runtime_pm pm; |
2004 | ||
a83014d3 OM |
2005 | /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ |
2006 | struct { | |
117897f4 TU |
2007 | void (*cleanup_engine)(struct intel_engine_cs *engine); |
2008 | void (*stop_engine)(struct intel_engine_cs *engine); | |
67d97da3 CW |
2009 | |
2010 | /** | |
2011 | * Is the GPU currently considered idle, or busy executing | |
2012 | * userspace requests? Whilst idle, we allow runtime power | |
2013 | * management to power down the hardware and display clocks. | |
2014 | * In order to reduce the effect on performance, there | |
2015 | * is a slight delay before we do so. | |
2016 | */ | |
2017 | unsigned int active_engines; | |
2018 | bool awake; | |
2019 | ||
2020 | /** | |
2021 | * We leave the user IRQ off as much as possible, | |
2022 | * but this means that requests will finish and never | |
2023 | * be retired once the system goes idle. Set a timer to | |
2024 | * fire periodically while the ring is running. When it | |
2025 | * fires, go retire requests. | |
2026 | */ | |
2027 | struct delayed_work retire_work; | |
2028 | ||
2029 | /** | |
2030 | * When we detect an idle GPU, we want to turn on | |
2031 | * powersaving features. So once we see that there | |
2032 | * are no more requests outstanding and no more | |
2033 | * arrive within a small period of time, we fire | |
2034 | * off the idle_work. | |
2035 | */ | |
2036 | struct delayed_work idle_work; | |
a83014d3 OM |
2037 | } gt; |
2038 | ||
3be60de9 VS |
2039 | /* perform PHY state sanity checks? */ |
2040 | bool chv_phy_assert[2]; | |
2041 | ||
0bdf5a05 TI |
2042 | struct intel_encoder *dig_port_map[I915_MAX_PORTS]; |
2043 | ||
bdf1e7e3 SV |
2044 | /* |
2045 | * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch | |
2046 | * will be rejected. Instead look for a better place. | |
2047 | */ | |
77fec556 | 2048 | }; |
1da177e4 | 2049 | |
2c1792a1 CW |
2050 | static inline struct drm_i915_private *to_i915(const struct drm_device *dev) |
2051 | { | |
091387c1 | 2052 | return container_of(dev, struct drm_i915_private, drm); |
2c1792a1 CW |
2053 | } |
2054 | ||
888d0d42 ID |
2055 | static inline struct drm_i915_private *dev_to_i915(struct device *dev) |
2056 | { | |
2057 | return to_i915(dev_get_drvdata(dev)); | |
2058 | } | |
2059 | ||
33a732f4 AD |
2060 | static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) |
2061 | { | |
2062 | return container_of(guc, struct drm_i915_private, guc); | |
2063 | } | |
2064 | ||
b4ac5afc DG |
2065 | /* Simple iterator over all initialised engines */ |
2066 | #define for_each_engine(engine__, dev_priv__) \ | |
2067 | for ((engine__) = &(dev_priv__)->engine[0]; \ | |
2068 | (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ | |
2069 | (engine__)++) \ | |
2070 | for_each_if (intel_engine_initialized(engine__)) | |
b4519513 | 2071 | |
c3232b18 DG |
2072 | /* Iterator with engine_id */ |
2073 | #define for_each_engine_id(engine__, dev_priv__, id__) \ | |
2074 | for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \ | |
2075 | (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ | |
2076 | (engine__)++) \ | |
2077 | for_each_if (((id__) = (engine__)->id, \ | |
2078 | intel_engine_initialized(engine__))) | |
2079 | ||
2080 | /* Iterator over subset of engines selected by mask */ | |
ee4b6faf | 2081 | #define for_each_engine_masked(engine__, dev_priv__, mask__) \ |
b4ac5afc DG |
2082 | for ((engine__) = &(dev_priv__)->engine[0]; \ |
2083 | (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ | |
2084 | (engine__)++) \ | |
2085 | for_each_if (((mask__) & intel_engine_flag(engine__)) && \ | |
2086 | intel_engine_initialized(engine__)) | |
ee4b6faf | 2087 | |
b1d7e4b4 WF |
2088 | enum hdmi_force_audio { |
2089 | HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ | |
2090 | HDMI_AUDIO_OFF, /* force turn off HDMI audio */ | |
2091 | HDMI_AUDIO_AUTO, /* trust EDID */ | |
2092 | HDMI_AUDIO_ON, /* force turn on HDMI audio */ | |
2093 | }; | |
2094 | ||
190d6cd5 | 2095 | #define I915_GTT_OFFSET_NONE ((u32)-1) |
ed2f3452 | 2096 | |
37e680a1 | 2097 | struct drm_i915_gem_object_ops { |
de472664 CW |
2098 | unsigned int flags; |
2099 | #define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1 | |
2100 | ||
37e680a1 CW |
2101 | /* Interface between the GEM object and its backing storage. |
2102 | * get_pages() is called once prior to the use of the associated set | |
2103 | * of pages before to binding them into the GTT, and put_pages() is | |
2104 | * called after we no longer need them. As we expect there to be | |
2105 | * associated cost with migrating pages between the backing storage | |
2106 | * and making them available for the GPU (e.g. clflush), we may hold | |
2107 | * onto the pages after they are no longer referenced by the GPU | |
2108 | * in case they may be used again shortly (for example migrating the | |
2109 | * pages to a different memory domain within the GTT). put_pages() | |
2110 | * will therefore most likely be called when the object itself is | |
2111 | * being released or under memory pressure (where we attempt to | |
2112 | * reap pages for the shrinker). | |
2113 | */ | |
2114 | int (*get_pages)(struct drm_i915_gem_object *); | |
2115 | void (*put_pages)(struct drm_i915_gem_object *); | |
de472664 | 2116 | |
5cc9ed4b CW |
2117 | int (*dmabuf_export)(struct drm_i915_gem_object *); |
2118 | void (*release)(struct drm_i915_gem_object *); | |
37e680a1 CW |
2119 | }; |
2120 | ||
a071fa00 SV |
2121 | /* |
2122 | * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is | |
d1b9d039 | 2123 | * considered to be the frontbuffer for the given plane interface-wise. This |
a071fa00 SV |
2124 | * doesn't mean that the hw necessarily already scans it out, but that any |
2125 | * rendering (by the cpu or gpu) will land in the frontbuffer eventually. | |
2126 | * | |
2127 | * We have one bit per pipe and per scanout plane type. | |
2128 | */ | |
d1b9d039 SAK |
2129 | #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5 |
2130 | #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 | |
a071fa00 SV |
2131 | #define INTEL_FRONTBUFFER_BITS \ |
2132 | (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES) | |
2133 | #define INTEL_FRONTBUFFER_PRIMARY(pipe) \ | |
2134 | (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) | |
2135 | #define INTEL_FRONTBUFFER_CURSOR(pipe) \ | |
d1b9d039 SAK |
2136 | (1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) |
2137 | #define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \ | |
2138 | (1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) | |
a071fa00 | 2139 | #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ |
d1b9d039 | 2140 | (1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) |
cc36513c | 2141 | #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ |
d1b9d039 | 2142 | (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) |
a071fa00 | 2143 | |
673a394b | 2144 | struct drm_i915_gem_object { |
c397b908 | 2145 | struct drm_gem_object base; |
673a394b | 2146 | |
37e680a1 CW |
2147 | const struct drm_i915_gem_object_ops *ops; |
2148 | ||
2f633156 BW |
2149 | /** List of VMAs backed by this object */ |
2150 | struct list_head vma_list; | |
2151 | ||
c1ad11fc CW |
2152 | /** Stolen memory for this object, instead of being backed by shmem. */ |
2153 | struct drm_mm_node *stolen; | |
35c20a60 | 2154 | struct list_head global_list; |
673a394b | 2155 | |
117897f4 | 2156 | struct list_head engine_list[I915_NUM_ENGINES]; |
b25cb2f8 BW |
2157 | /** Used in execbuf to temporarily hold a ref */ |
2158 | struct list_head obj_exec_link; | |
673a394b | 2159 | |
8d9d5744 | 2160 | struct list_head batch_pool_link; |
493018dc | 2161 | |
673a394b | 2162 | /** |
65ce3027 CW |
2163 | * This is set if the object is on the active lists (has pending |
2164 | * rendering and so a non-zero seqno), and is not set if it i s on | |
2165 | * inactive (ready to be unbound) list. | |
673a394b | 2166 | */ |
666796da | 2167 | unsigned int active:I915_NUM_ENGINES; |
673a394b EA |
2168 | |
2169 | /** | |
2170 | * This is set if the object has been written to since last bound | |
2171 | * to the GTT | |
2172 | */ | |
0206e353 | 2173 | unsigned int dirty:1; |
778c3544 SV |
2174 | |
2175 | /** | |
2176 | * Fence register bits (if any) for this object. Will be set | |
2177 | * as needed when mapped into the GTT. | |
2178 | * Protected by dev->struct_mutex. | |
778c3544 | 2179 | */ |
4b9de737 | 2180 | signed int fence_reg:I915_MAX_NUM_FENCE_BITS; |
778c3544 | 2181 | |
778c3544 SV |
2182 | /** |
2183 | * Advice: are the backing pages purgeable? | |
2184 | */ | |
0206e353 | 2185 | unsigned int madv:2; |
778c3544 | 2186 | |
778c3544 SV |
2187 | /** |
2188 | * Current tiling mode for the object. | |
2189 | */ | |
0206e353 | 2190 | unsigned int tiling_mode:2; |
5d82e3e6 CW |
2191 | /** |
2192 | * Whether the tiling parameters for the currently associated fence | |
2193 | * register have changed. Note that for the purposes of tracking | |
2194 | * tiling changes we also treat the unfenced register, the register | |
2195 | * slot that the object occupies whilst it executes a fenced | |
2196 | * command (such as BLT on gen2/3), as a "fence". | |
2197 | */ | |
2198 | unsigned int fence_dirty:1; | |
778c3544 | 2199 | |
75e9e915 SV |
2200 | /** |
2201 | * Is the object at the current location in the gtt mappable and | |
2202 | * fenceable? Used to avoid costly recalculations. | |
2203 | */ | |
0206e353 | 2204 | unsigned int map_and_fenceable:1; |
75e9e915 | 2205 | |
fb7d516a SV |
2206 | /** |
2207 | * Whether the current gtt mapping needs to be mappable (and isn't just | |
2208 | * mappable by accident). Track pin and fault separate for a more | |
2209 | * accurate mappable working set. | |
2210 | */ | |
0206e353 | 2211 | unsigned int fault_mappable:1; |
fb7d516a | 2212 | |
24f3a8cf AG |
2213 | /* |
2214 | * Is the object to be mapped as read-only to the GPU | |
2215 | * Only honoured if hardware has relevant pte bit | |
2216 | */ | |
2217 | unsigned long gt_ro:1; | |
651d794f | 2218 | unsigned int cache_level:3; |
0f71979a | 2219 | unsigned int cache_dirty:1; |
93dfb40c | 2220 | |
a071fa00 SV |
2221 | unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; |
2222 | ||
aeecc969 | 2223 | unsigned int has_wc_mmap; |
8a0c39b1 TU |
2224 | unsigned int pin_display; |
2225 | ||
9da3da66 | 2226 | struct sg_table *pages; |
a5570178 | 2227 | int pages_pin_count; |
ee286370 CW |
2228 | struct get_page { |
2229 | struct scatterlist *sg; | |
2230 | int last; | |
2231 | } get_page; | |
0a798eb9 | 2232 | void *mapping; |
9a70cc2a | 2233 | |
b4716185 CW |
2234 | /** Breadcrumb of last rendering to the buffer. |
2235 | * There can only be one writer, but we allow for multiple readers. | |
2236 | * If there is a writer that necessarily implies that all other | |
2237 | * read requests are complete - but we may only be lazily clearing | |
2238 | * the read requests. A read request is naturally the most recent | |
2239 | * request on a ring, so we may have two different write and read | |
2240 | * requests on one ring where the write request is older than the | |
2241 | * read request. This allows for the CPU to read from an active | |
2242 | * buffer by only waiting for the write to complete. | |
2243 | * */ | |
666796da | 2244 | struct drm_i915_gem_request *last_read_req[I915_NUM_ENGINES]; |
97b2a6a1 | 2245 | struct drm_i915_gem_request *last_write_req; |
caea7476 | 2246 | /** Breadcrumb of last fenced GPU access to the buffer. */ |
97b2a6a1 | 2247 | struct drm_i915_gem_request *last_fenced_req; |
673a394b | 2248 | |
778c3544 | 2249 | /** Current tiling stride for the object, if it's tiled. */ |
de151cf6 | 2250 | uint32_t stride; |
673a394b | 2251 | |
80075d49 SV |
2252 | /** References from framebuffers, locks out tiling changes. */ |
2253 | unsigned long framebuffer_references; | |
2254 | ||
280b713b | 2255 | /** Record of address bit 17 of each page at last unbind. */ |
d312ec25 | 2256 | unsigned long *bit_17; |
280b713b | 2257 | |
5cc9ed4b | 2258 | union { |
6a2c4232 CW |
2259 | /** for phy allocated objects */ |
2260 | struct drm_dma_handle *phys_handle; | |
2261 | ||
5cc9ed4b CW |
2262 | struct i915_gem_userptr { |
2263 | uintptr_t ptr; | |
2264 | unsigned read_only :1; | |
2265 | unsigned workers :4; | |
2266 | #define I915_GEM_USERPTR_MAX_WORKERS 15 | |
2267 | ||
ad46cb53 CW |
2268 | struct i915_mm_struct *mm; |
2269 | struct i915_mmu_object *mmu_object; | |
5cc9ed4b CW |
2270 | struct work_struct *work; |
2271 | } userptr; | |
2272 | }; | |
2273 | }; | |
03ac0642 CW |
2274 | |
2275 | static inline struct drm_i915_gem_object * | |
2276 | to_intel_bo(struct drm_gem_object *gem) | |
2277 | { | |
2278 | /* Assert that to_intel_bo(NULL) == NULL */ | |
2279 | BUILD_BUG_ON(offsetof(struct drm_i915_gem_object, base)); | |
2280 | ||
2281 | return container_of(gem, struct drm_i915_gem_object, base); | |
2282 | } | |
2283 | ||
2284 | static inline struct drm_i915_gem_object * | |
2285 | i915_gem_object_lookup(struct drm_file *file, u32 handle) | |
2286 | { | |
2287 | return to_intel_bo(drm_gem_object_lookup(file, handle)); | |
2288 | } | |
2289 | ||
2290 | __deprecated | |
2291 | extern struct drm_gem_object * | |
2292 | drm_gem_object_lookup(struct drm_file *file, u32 handle); | |
23010e43 | 2293 | |
25dc556a CW |
2294 | __attribute__((nonnull)) |
2295 | static inline struct drm_i915_gem_object * | |
2296 | i915_gem_object_get(struct drm_i915_gem_object *obj) | |
2297 | { | |
2298 | drm_gem_object_reference(&obj->base); | |
2299 | return obj; | |
2300 | } | |
2301 | ||
2302 | __deprecated | |
2303 | extern void drm_gem_object_reference(struct drm_gem_object *); | |
2304 | ||
f8c417cd CW |
2305 | __attribute__((nonnull)) |
2306 | static inline void | |
2307 | i915_gem_object_put(struct drm_i915_gem_object *obj) | |
2308 | { | |
2309 | drm_gem_object_unreference(&obj->base); | |
2310 | } | |
2311 | ||
2312 | __deprecated | |
2313 | extern void drm_gem_object_unreference(struct drm_gem_object *); | |
2314 | ||
34911fd3 CW |
2315 | __attribute__((nonnull)) |
2316 | static inline void | |
2317 | i915_gem_object_put_unlocked(struct drm_i915_gem_object *obj) | |
2318 | { | |
2319 | drm_gem_object_unreference_unlocked(&obj->base); | |
2320 | } | |
2321 | ||
2322 | __deprecated | |
2323 | extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *); | |
2324 | ||
b9bcd14a CW |
2325 | static inline bool |
2326 | i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) | |
2327 | { | |
2328 | return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE; | |
2329 | } | |
2330 | ||
85d1225e DG |
2331 | /* |
2332 | * Optimised SGL iterator for GEM objects | |
2333 | */ | |
2334 | static __always_inline struct sgt_iter { | |
2335 | struct scatterlist *sgp; | |
2336 | union { | |
2337 | unsigned long pfn; | |
2338 | dma_addr_t dma; | |
2339 | }; | |
2340 | unsigned int curr; | |
2341 | unsigned int max; | |
2342 | } __sgt_iter(struct scatterlist *sgl, bool dma) { | |
2343 | struct sgt_iter s = { .sgp = sgl }; | |
2344 | ||
2345 | if (s.sgp) { | |
2346 | s.max = s.curr = s.sgp->offset; | |
2347 | s.max += s.sgp->length; | |
2348 | if (dma) | |
2349 | s.dma = sg_dma_address(s.sgp); | |
2350 | else | |
2351 | s.pfn = page_to_pfn(sg_page(s.sgp)); | |
2352 | } | |
2353 | ||
2354 | return s; | |
2355 | } | |
2356 | ||
63d15326 DG |
2357 | /** |
2358 | * __sg_next - return the next scatterlist entry in a list | |
2359 | * @sg: The current sg entry | |
2360 | * | |
2361 | * Description: | |
2362 | * If the entry is the last, return NULL; otherwise, step to the next | |
2363 | * element in the array (@sg@+1). If that's a chain pointer, follow it; | |
2364 | * otherwise just return the pointer to the current element. | |
2365 | **/ | |
2366 | static inline struct scatterlist *__sg_next(struct scatterlist *sg) | |
2367 | { | |
2368 | #ifdef CONFIG_DEBUG_SG | |
2369 | BUG_ON(sg->sg_magic != SG_MAGIC); | |
2370 | #endif | |
2371 | return sg_is_last(sg) ? NULL : | |
2372 | likely(!sg_is_chain(++sg)) ? sg : | |
2373 | sg_chain_ptr(sg); | |
2374 | } | |
2375 | ||
85d1225e DG |
2376 | /** |
2377 | * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table | |
2378 | * @__dmap: DMA address (output) | |
2379 | * @__iter: 'struct sgt_iter' (iterator state, internal) | |
2380 | * @__sgt: sg_table to iterate over (input) | |
2381 | */ | |
2382 | #define for_each_sgt_dma(__dmap, __iter, __sgt) \ | |
2383 | for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ | |
2384 | ((__dmap) = (__iter).dma + (__iter).curr); \ | |
2385 | (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ | |
63d15326 | 2386 | ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0)) |
85d1225e DG |
2387 | |
2388 | /** | |
2389 | * for_each_sgt_page - iterate over the pages of the given sg_table | |
2390 | * @__pp: page pointer (output) | |
2391 | * @__iter: 'struct sgt_iter' (iterator state, internal) | |
2392 | * @__sgt: sg_table to iterate over (input) | |
2393 | */ | |
2394 | #define for_each_sgt_page(__pp, __iter, __sgt) \ | |
2395 | for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ | |
2396 | ((__pp) = (__iter).pfn == 0 ? NULL : \ | |
2397 | pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ | |
2398 | (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ | |
63d15326 | 2399 | ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0)) |
a071fa00 | 2400 | |
351e3db2 BV |
2401 | /* |
2402 | * A command that requires special handling by the command parser. | |
2403 | */ | |
2404 | struct drm_i915_cmd_descriptor { | |
2405 | /* | |
2406 | * Flags describing how the command parser processes the command. | |
2407 | * | |
2408 | * CMD_DESC_FIXED: The command has a fixed length if this is set, | |
2409 | * a length mask if not set | |
2410 | * CMD_DESC_SKIP: The command is allowed but does not follow the | |
2411 | * standard length encoding for the opcode range in | |
2412 | * which it falls | |
2413 | * CMD_DESC_REJECT: The command is never allowed | |
2414 | * CMD_DESC_REGISTER: The command should be checked against the | |
2415 | * register whitelist for the appropriate ring | |
2416 | * CMD_DESC_MASTER: The command is allowed if the submitting process | |
2417 | * is the DRM master | |
2418 | */ | |
2419 | u32 flags; | |
2420 | #define CMD_DESC_FIXED (1<<0) | |
2421 | #define CMD_DESC_SKIP (1<<1) | |
2422 | #define CMD_DESC_REJECT (1<<2) | |
2423 | #define CMD_DESC_REGISTER (1<<3) | |
2424 | #define CMD_DESC_BITMASK (1<<4) | |
2425 | #define CMD_DESC_MASTER (1<<5) | |
2426 | ||
2427 | /* | |
2428 | * The command's unique identification bits and the bitmask to get them. | |
2429 | * This isn't strictly the opcode field as defined in the spec and may | |
2430 | * also include type, subtype, and/or subop fields. | |
2431 | */ | |
2432 | struct { | |
2433 | u32 value; | |
2434 | u32 mask; | |
2435 | } cmd; | |
2436 | ||
2437 | /* | |
2438 | * The command's length. The command is either fixed length (i.e. does | |
2439 | * not include a length field) or has a length field mask. The flag | |
2440 | * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has | |
2441 | * a length mask. All command entries in a command table must include | |
2442 | * length information. | |
2443 | */ | |
2444 | union { | |
2445 | u32 fixed; | |
2446 | u32 mask; | |
2447 | } length; | |
2448 | ||
2449 | /* | |
2450 | * Describes where to find a register address in the command to check | |
2451 | * against the ring's register whitelist. Only valid if flags has the | |
2452 | * CMD_DESC_REGISTER bit set. | |
6a65c5b9 FJ |
2453 | * |
2454 | * A non-zero step value implies that the command may access multiple | |
2455 | * registers in sequence (e.g. LRI), in that case step gives the | |
2456 | * distance in dwords between individual offset fields. | |
351e3db2 BV |
2457 | */ |
2458 | struct { | |
2459 | u32 offset; | |
2460 | u32 mask; | |
6a65c5b9 | 2461 | u32 step; |
351e3db2 BV |
2462 | } reg; |
2463 | ||
2464 | #define MAX_CMD_DESC_BITMASKS 3 | |
2465 | /* | |
2466 | * Describes command checks where a particular dword is masked and | |
2467 | * compared against an expected value. If the command does not match | |
2468 | * the expected value, the parser rejects it. Only valid if flags has | |
2469 | * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero | |
2470 | * are valid. | |
d4d48035 BV |
2471 | * |
2472 | * If the check specifies a non-zero condition_mask then the parser | |
2473 | * only performs the check when the bits specified by condition_mask | |
2474 | * are non-zero. | |
351e3db2 BV |
2475 | */ |
2476 | struct { | |
2477 | u32 offset; | |
2478 | u32 mask; | |
2479 | u32 expected; | |
d4d48035 BV |
2480 | u32 condition_offset; |
2481 | u32 condition_mask; | |
351e3db2 BV |
2482 | } bits[MAX_CMD_DESC_BITMASKS]; |
2483 | }; | |
2484 | ||
2485 | /* | |
2486 | * A table of commands requiring special handling by the command parser. | |
2487 | * | |
33a051a5 CW |
2488 | * Each engine has an array of tables. Each table consists of an array of |
2489 | * command descriptors, which must be sorted with command opcodes in | |
2490 | * ascending order. | |
351e3db2 BV |
2491 | */ |
2492 | struct drm_i915_cmd_table { | |
2493 | const struct drm_i915_cmd_descriptor *table; | |
2494 | int count; | |
2495 | }; | |
2496 | ||
dbbe9127 | 2497 | /* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */ |
7312e2dd CW |
2498 | #define __I915__(p) ({ \ |
2499 | struct drm_i915_private *__p; \ | |
2500 | if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \ | |
2501 | __p = (struct drm_i915_private *)p; \ | |
2502 | else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \ | |
2503 | __p = to_i915((struct drm_device *)p); \ | |
2504 | else \ | |
2505 | BUILD_BUG(); \ | |
2506 | __p; \ | |
2507 | }) | |
dbbe9127 | 2508 | #define INTEL_INFO(p) (&__I915__(p)->info) |
3f10e82f | 2509 | #define INTEL_GEN(p) (INTEL_INFO(p)->gen) |
87f1f465 | 2510 | #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) |
cae5852d | 2511 | |
e87a005d | 2512 | #define REVID_FOREVER 0xff |
091387c1 | 2513 | #define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision) |
ac657f64 TU |
2514 | |
2515 | #define GEN_FOREVER (0) | |
2516 | /* | |
2517 | * Returns true if Gen is in inclusive range [Start, End]. | |
2518 | * | |
2519 | * Use GEN_FOREVER for unbound start and or end. | |
2520 | */ | |
2521 | #define IS_GEN(p, s, e) ({ \ | |
2522 | unsigned int __s = (s), __e = (e); \ | |
2523 | BUILD_BUG_ON(!__builtin_constant_p(s)); \ | |
2524 | BUILD_BUG_ON(!__builtin_constant_p(e)); \ | |
2525 | if ((__s) != GEN_FOREVER) \ | |
2526 | __s = (s) - 1; \ | |
2527 | if ((__e) == GEN_FOREVER) \ | |
2528 | __e = BITS_PER_LONG - 1; \ | |
2529 | else \ | |
2530 | __e = (e) - 1; \ | |
2531 | !!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \ | |
2532 | }) | |
2533 | ||
e87a005d JN |
2534 | /* |
2535 | * Return true if revision is in range [since,until] inclusive. | |
2536 | * | |
2537 | * Use 0 for open-ended since, and REVID_FOREVER for open-ended until. | |
2538 | */ | |
2539 | #define IS_REVID(p, since, until) \ | |
2540 | (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) | |
2541 | ||
87f1f465 CW |
2542 | #define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) |
2543 | #define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) | |
cae5852d | 2544 | #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) |
87f1f465 | 2545 | #define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572) |
cae5852d | 2546 | #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) |
87f1f465 CW |
2547 | #define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592) |
2548 | #define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772) | |
cae5852d ZN |
2549 | #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) |
2550 | #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) | |
2551 | #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) | |
87f1f465 | 2552 | #define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42) |
cae5852d | 2553 | #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) |
87f1f465 CW |
2554 | #define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001) |
2555 | #define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011) | |
cae5852d ZN |
2556 | #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) |
2557 | #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) | |
87f1f465 | 2558 | #define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046) |
4b65177b | 2559 | #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) |
87f1f465 CW |
2560 | #define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \ |
2561 | INTEL_DEVID(dev) == 0x0152 || \ | |
2562 | INTEL_DEVID(dev) == 0x015a) | |
70a3eb7a | 2563 | #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) |
666a4537 | 2564 | #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview) |
4cae9ae0 | 2565 | #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) |
ab0d24ac | 2566 | #define IS_BROADWELL(dev) (INTEL_INFO(dev)->is_broadwell) |
7201c0b3 | 2567 | #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) |
7526ac19 | 2568 | #define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton) |
ef11bdb3 | 2569 | #define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake) |
cae5852d | 2570 | #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
ed1c9e2c | 2571 | #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ |
87f1f465 | 2572 | (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) |
5dd8c4c3 | 2573 | #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ |
6b96d705 | 2574 | ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ |
0dc6f20b | 2575 | (INTEL_DEVID(dev) & 0xf) == 0xb || \ |
87f1f465 | 2576 | (INTEL_DEVID(dev) & 0xf) == 0xe)) |
ebb72aad VS |
2577 | /* ULX machines are also considered ULT. */ |
2578 | #define IS_BDW_ULX(dev) (IS_BROADWELL(dev) && \ | |
2579 | (INTEL_DEVID(dev) & 0xf) == 0xe) | |
a0fcbd95 RV |
2580 | #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ |
2581 | (INTEL_DEVID(dev) & 0x00F0) == 0x0020) | |
5dd8c4c3 | 2582 | #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ |
87f1f465 | 2583 | (INTEL_DEVID(dev) & 0xFF00) == 0x0A00) |
9435373e | 2584 | #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ |
87f1f465 | 2585 | (INTEL_DEVID(dev) & 0x00F0) == 0x0020) |
9bbfd20a | 2586 | /* ULX machines are also considered ULT. */ |
87f1f465 CW |
2587 | #define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \ |
2588 | INTEL_DEVID(dev) == 0x0A1E) | |
f8896f5d DW |
2589 | #define IS_SKL_ULT(dev) (INTEL_DEVID(dev) == 0x1906 || \ |
2590 | INTEL_DEVID(dev) == 0x1913 || \ | |
2591 | INTEL_DEVID(dev) == 0x1916 || \ | |
2592 | INTEL_DEVID(dev) == 0x1921 || \ | |
2593 | INTEL_DEVID(dev) == 0x1926) | |
2594 | #define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \ | |
2595 | INTEL_DEVID(dev) == 0x1915 || \ | |
2596 | INTEL_DEVID(dev) == 0x191E) | |
a5b7991c RV |
2597 | #define IS_KBL_ULT(dev) (INTEL_DEVID(dev) == 0x5906 || \ |
2598 | INTEL_DEVID(dev) == 0x5913 || \ | |
2599 | INTEL_DEVID(dev) == 0x5916 || \ | |
2600 | INTEL_DEVID(dev) == 0x5921 || \ | |
2601 | INTEL_DEVID(dev) == 0x5926) | |
2602 | #define IS_KBL_ULX(dev) (INTEL_DEVID(dev) == 0x590E || \ | |
2603 | INTEL_DEVID(dev) == 0x5915 || \ | |
2604 | INTEL_DEVID(dev) == 0x591E) | |
7a58bad0 SAK |
2605 | #define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \ |
2606 | (INTEL_DEVID(dev) & 0x00F0) == 0x0020) | |
2607 | #define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \ | |
2608 | (INTEL_DEVID(dev) & 0x00F0) == 0x0030) | |
2609 | ||
b833d685 | 2610 | #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) |
cae5852d | 2611 | |
ef712bb4 JN |
2612 | #define SKL_REVID_A0 0x0 |
2613 | #define SKL_REVID_B0 0x1 | |
2614 | #define SKL_REVID_C0 0x2 | |
2615 | #define SKL_REVID_D0 0x3 | |
2616 | #define SKL_REVID_E0 0x4 | |
2617 | #define SKL_REVID_F0 0x5 | |
4ba9c1f7 MK |
2618 | #define SKL_REVID_G0 0x6 |
2619 | #define SKL_REVID_H0 0x7 | |
ef712bb4 | 2620 | |
e87a005d JN |
2621 | #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until)) |
2622 | ||
ef712bb4 | 2623 | #define BXT_REVID_A0 0x0 |
fffda3f4 | 2624 | #define BXT_REVID_A1 0x1 |
ef712bb4 JN |
2625 | #define BXT_REVID_B0 0x3 |
2626 | #define BXT_REVID_C0 0x9 | |
6c74c87f | 2627 | |
e87a005d JN |
2628 | #define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until)) |
2629 | ||
c033a37c MK |
2630 | #define KBL_REVID_A0 0x0 |
2631 | #define KBL_REVID_B0 0x1 | |
fe905819 MK |
2632 | #define KBL_REVID_C0 0x2 |
2633 | #define KBL_REVID_D0 0x3 | |
2634 | #define KBL_REVID_E0 0x4 | |
c033a37c MK |
2635 | |
2636 | #define IS_KBL_REVID(p, since, until) \ | |
2637 | (IS_KABYLAKE(p) && IS_REVID(p, since, until)) | |
2638 | ||
85436696 JB |
2639 | /* |
2640 | * The genX designation typically refers to the render engine, so render | |
2641 | * capability related checks should use IS_GEN, while display and other checks | |
2642 | * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular | |
2643 | * chips, etc.). | |
2644 | */ | |
af1346a0 TU |
2645 | #define IS_GEN2(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(1))) |
2646 | #define IS_GEN3(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(2))) | |
2647 | #define IS_GEN4(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(3))) | |
2648 | #define IS_GEN5(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(4))) | |
2649 | #define IS_GEN6(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(5))) | |
2650 | #define IS_GEN7(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(6))) | |
2651 | #define IS_GEN8(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(7))) | |
2652 | #define IS_GEN9(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(8))) | |
cae5852d | 2653 | |
a19d6ff2 TU |
2654 | #define ENGINE_MASK(id) BIT(id) |
2655 | #define RENDER_RING ENGINE_MASK(RCS) | |
2656 | #define BSD_RING ENGINE_MASK(VCS) | |
2657 | #define BLT_RING ENGINE_MASK(BCS) | |
2658 | #define VEBOX_RING ENGINE_MASK(VECS) | |
2659 | #define BSD2_RING ENGINE_MASK(VCS2) | |
2660 | #define ALL_ENGINES (~0) | |
2661 | ||
2662 | #define HAS_ENGINE(dev_priv, id) \ | |
af1346a0 | 2663 | (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id))) |
a19d6ff2 TU |
2664 | |
2665 | #define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS) | |
2666 | #define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2) | |
2667 | #define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS) | |
2668 | #define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS) | |
2669 | ||
63c42e56 | 2670 | #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) |
ca377809 | 2671 | #define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop) |
af1346a0 | 2672 | #define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED)) |
63c42e56 | 2673 | #define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ |
3accaf7e | 2674 | HAS_EDRAM(dev)) |
cae5852d ZN |
2675 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
2676 | ||
254f965c | 2677 | #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) |
d7f621e5 | 2678 | #define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8) |
692ef70c | 2679 | #define USES_PPGTT(dev) (i915.enable_ppgtt) |
81ba8aef MT |
2680 | #define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2) |
2681 | #define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3) | |
1d2a314c | 2682 | |
05394f39 | 2683 | #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) |
cae5852d ZN |
2684 | #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) |
2685 | ||
b45305fc SV |
2686 | /* Early gen2 have a totally busted CS tlb and require pinned batches. */ |
2687 | #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) | |
06e668ac MK |
2688 | |
2689 | /* WaRsDisableCoarsePowerGating:skl,bxt */ | |
61251512 TU |
2690 | #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ |
2691 | (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \ | |
2692 | IS_SKL_GT3(dev_priv) || \ | |
2693 | IS_SKL_GT4(dev_priv)) | |
185c66e5 | 2694 | |
4e6b788c SV |
2695 | /* |
2696 | * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts | |
2697 | * even when in MSI mode. This results in spurious interrupt warnings if the | |
2698 | * legacy irq no. is shared with another device. The kernel then disables that | |
2699 | * interrupt source and so prevents the other device from working properly. | |
2700 | */ | |
2701 | #define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) | |
2702 | #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) | |
b45305fc | 2703 | |
cae5852d ZN |
2704 | /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte |
2705 | * rows, which changed the alignment requirements and fence programming. | |
2706 | */ | |
2707 | #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ | |
2708 | IS_I915GM(dev))) | |
cae5852d ZN |
2709 | #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) |
2710 | #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) | |
cae5852d ZN |
2711 | |
2712 | #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) | |
2713 | #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) | |
3a77c4c4 | 2714 | #define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) |
cae5852d | 2715 | |
dbf7786e | 2716 | #define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev)) |
f5adf94e | 2717 | |
0c9b3715 JN |
2718 | #define HAS_DP_MST(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ |
2719 | INTEL_INFO(dev)->gen >= 9) | |
2720 | ||
dd93be58 | 2721 | #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) |
30568c45 | 2722 | #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) |
b32c6f48 | 2723 | #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ |
e3d99845 | 2724 | IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \ |
ef11bdb3 | 2725 | IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) |
6157d3c8 | 2726 | #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ |
00776511 | 2727 | IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \ |
666a4537 | 2728 | IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \ |
8f6d855c | 2729 | IS_KABYLAKE(dev) || IS_BROXTON(dev)) |
58abf1da | 2730 | #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) |
7e22dbbb | 2731 | #define HAS_RC6p(dev) (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) |
affa9354 | 2732 | |
7b403ffb | 2733 | #define HAS_CSR(dev) (IS_GEN9(dev)) |
eb805623 | 2734 | |
1a3d1898 DG |
2735 | /* |
2736 | * For now, anything with a GuC requires uCode loading, and then supports | |
2737 | * command submission once loaded. But these are logically independent | |
2738 | * properties, so we have separate macros to test them. | |
2739 | */ | |
6f8be280 | 2740 | #define HAS_GUC(dev) (IS_GEN9(dev)) |
1a3d1898 DG |
2741 | #define HAS_GUC_UCODE(dev) (HAS_GUC(dev)) |
2742 | #define HAS_GUC_SCHED(dev) (HAS_GUC(dev)) | |
33a732f4 | 2743 | |
a9ed33ca AJ |
2744 | #define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ |
2745 | INTEL_INFO(dev)->gen >= 8) | |
2746 | ||
97d3308a | 2747 | #define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \ |
666a4537 WB |
2748 | !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \ |
2749 | !IS_BROXTON(dev)) | |
97d3308a | 2750 | |
33e141ed | 2751 | #define HAS_POOLED_EU(dev) (INTEL_INFO(dev)->has_pooled_eu) |
2752 | ||
17a303ec PZ |
2753 | #define INTEL_PCH_DEVICE_ID_MASK 0xff00 |
2754 | #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 | |
2755 | #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 | |
2756 | #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 | |
2757 | #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 | |
2758 | #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 | |
e7e7ea20 S |
2759 | #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 |
2760 | #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 | |
22dea0be | 2761 | #define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200 |
30c964a6 | 2762 | #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 |
1844a66b | 2763 | #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 |
39bfcd52 | 2764 | #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ |
17a303ec | 2765 | |
f2fbc690 | 2766 | #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) |
22dea0be | 2767 | #define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP) |
e7e7ea20 | 2768 | #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) |
eb877ebf | 2769 | #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) |
c2699524 | 2770 | #define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) |
56f5f700 | 2771 | #define HAS_PCH_LPT_H(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) |
cae5852d ZN |
2772 | #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) |
2773 | #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) | |
40c7ead9 | 2774 | #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) |
45e6e3a1 | 2775 | #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) |
cae5852d | 2776 | |
666a4537 WB |
2777 | #define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || \ |
2778 | IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) | |
5fafe292 | 2779 | |
040d2baa BW |
2780 | /* DPF == dynamic parity feature */ |
2781 | #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | |
2782 | #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) | |
e1ef7cc2 | 2783 | |
c8735b0c | 2784 | #define GT_FREQUENCY_MULTIPLIER 50 |
de43ae9d | 2785 | #define GEN9_FREQ_SCALER 3 |
c8735b0c | 2786 | |
05394f39 CW |
2787 | #include "i915_trace.h" |
2788 | ||
48f112fe CW |
2789 | static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) |
2790 | { | |
2791 | #ifdef CONFIG_INTEL_IOMMU | |
2792 | if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped) | |
2793 | return true; | |
2794 | #endif | |
2795 | return false; | |
2796 | } | |
2797 | ||
1751fcf9 ML |
2798 | extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); |
2799 | extern int i915_resume_switcheroo(struct drm_device *dev); | |
7c1c2871 | 2800 | |
c033666a CW |
2801 | int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, |
2802 | int enable_ppgtt); | |
0e4ca100 | 2803 | |
39df9190 CW |
2804 | bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value); |
2805 | ||
0673ad47 | 2806 | /* i915_drv.c */ |
d15d7538 ID |
2807 | void __printf(3, 4) |
2808 | __i915_printk(struct drm_i915_private *dev_priv, const char *level, | |
2809 | const char *fmt, ...); | |
2810 | ||
2811 | #define i915_report_error(dev_priv, fmt, ...) \ | |
2812 | __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) | |
2813 | ||
c43b5634 | 2814 | #ifdef CONFIG_COMPAT |
0d6aa60b DA |
2815 | extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, |
2816 | unsigned long arg); | |
c43b5634 | 2817 | #endif |
dc97997a CW |
2818 | extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); |
2819 | extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); | |
c033666a | 2820 | extern int i915_reset(struct drm_i915_private *dev_priv); |
6b332fa2 | 2821 | extern int intel_guc_reset(struct drm_i915_private *dev_priv); |
fc0768ce | 2822 | extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); |
7648fa99 JB |
2823 | extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); |
2824 | extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); | |
2825 | extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); | |
2826 | extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); | |
650ad970 | 2827 | int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); |
7648fa99 | 2828 | |
77913b39 | 2829 | /* intel_hotplug.c */ |
91d14251 TU |
2830 | void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, |
2831 | u32 pin_mask, u32 long_mask); | |
77913b39 JN |
2832 | void intel_hpd_init(struct drm_i915_private *dev_priv); |
2833 | void intel_hpd_init_work(struct drm_i915_private *dev_priv); | |
2834 | void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); | |
cc24fcdc | 2835 | bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); |
b236d7c8 L |
2836 | bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); |
2837 | void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); | |
77913b39 | 2838 | |
1da177e4 | 2839 | /* i915_irq.c */ |
26a02b8f CW |
2840 | static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) |
2841 | { | |
2842 | unsigned long delay; | |
2843 | ||
2844 | if (unlikely(!i915.enable_hangcheck)) | |
2845 | return; | |
2846 | ||
2847 | /* Don't continually defer the hangcheck so that it is always run at | |
2848 | * least once after work has been scheduled on any ring. Otherwise, | |
2849 | * we will ignore a hung ring if a second ring is kept busy. | |
2850 | */ | |
2851 | ||
2852 | delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES); | |
2853 | queue_delayed_work(system_long_wq, | |
2854 | &dev_priv->gpu_error.hangcheck_work, delay); | |
2855 | } | |
2856 | ||
58174462 | 2857 | __printf(3, 4) |
c033666a CW |
2858 | void i915_handle_error(struct drm_i915_private *dev_priv, |
2859 | u32 engine_mask, | |
58174462 | 2860 | const char *fmt, ...); |
1da177e4 | 2861 | |
b963291c | 2862 | extern void intel_irq_init(struct drm_i915_private *dev_priv); |
2aeb7d3a SV |
2863 | int intel_irq_install(struct drm_i915_private *dev_priv); |
2864 | void intel_irq_uninstall(struct drm_i915_private *dev_priv); | |
907b28c5 | 2865 | |
dc97997a CW |
2866 | extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv); |
2867 | extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, | |
10018603 | 2868 | bool restore_forcewake); |
dc97997a | 2869 | extern void intel_uncore_init(struct drm_i915_private *dev_priv); |
fc97618b | 2870 | extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); |
bc3b9346 | 2871 | extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); |
dc97997a CW |
2872 | extern void intel_uncore_fini(struct drm_i915_private *dev_priv); |
2873 | extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv, | |
2874 | bool restore); | |
48c1026a | 2875 | const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); |
59bad947 | 2876 | void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, |
48c1026a | 2877 | enum forcewake_domains domains); |
59bad947 | 2878 | void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, |
48c1026a | 2879 | enum forcewake_domains domains); |
a6111f7b CW |
2880 | /* Like above but the caller must manage the uncore.lock itself. |
2881 | * Must be used with I915_READ_FW and friends. | |
2882 | */ | |
2883 | void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, | |
2884 | enum forcewake_domains domains); | |
2885 | void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, | |
2886 | enum forcewake_domains domains); | |
3accaf7e MK |
2887 | u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); |
2888 | ||
59bad947 | 2889 | void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); |
0ad35fed | 2890 | |
1758b90e CW |
2891 | int intel_wait_for_register(struct drm_i915_private *dev_priv, |
2892 | i915_reg_t reg, | |
2893 | const u32 mask, | |
2894 | const u32 value, | |
2895 | const unsigned long timeout_ms); | |
2896 | int intel_wait_for_register_fw(struct drm_i915_private *dev_priv, | |
2897 | i915_reg_t reg, | |
2898 | const u32 mask, | |
2899 | const u32 value, | |
2900 | const unsigned long timeout_ms); | |
2901 | ||
0ad35fed ZW |
2902 | static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) |
2903 | { | |
2904 | return dev_priv->gvt.initialized; | |
2905 | } | |
2906 | ||
c033666a | 2907 | static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) |
cf9d2890 | 2908 | { |
c033666a | 2909 | return dev_priv->vgpu.active; |
cf9d2890 | 2910 | } |
b1f14ad0 | 2911 | |
7c463586 | 2912 | void |
50227e1c | 2913 | i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, |
755e9019 | 2914 | u32 status_mask); |
7c463586 KP |
2915 | |
2916 | void | |
50227e1c | 2917 | i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, |
755e9019 | 2918 | u32 status_mask); |
7c463586 | 2919 | |
f8b79e58 ID |
2920 | void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); |
2921 | void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); | |
0706f17c EE |
2922 | void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, |
2923 | uint32_t mask, | |
2924 | uint32_t bits); | |
fbdedaea VS |
2925 | void ilk_update_display_irq(struct drm_i915_private *dev_priv, |
2926 | uint32_t interrupt_mask, | |
2927 | uint32_t enabled_irq_mask); | |
2928 | static inline void | |
2929 | ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) | |
2930 | { | |
2931 | ilk_update_display_irq(dev_priv, bits, bits); | |
2932 | } | |
2933 | static inline void | |
2934 | ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) | |
2935 | { | |
2936 | ilk_update_display_irq(dev_priv, bits, 0); | |
2937 | } | |
013d3752 VS |
2938 | void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, |
2939 | enum pipe pipe, | |
2940 | uint32_t interrupt_mask, | |
2941 | uint32_t enabled_irq_mask); | |
2942 | static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv, | |
2943 | enum pipe pipe, uint32_t bits) | |
2944 | { | |
2945 | bdw_update_pipe_irq(dev_priv, pipe, bits, bits); | |
2946 | } | |
2947 | static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv, | |
2948 | enum pipe pipe, uint32_t bits) | |
2949 | { | |
2950 | bdw_update_pipe_irq(dev_priv, pipe, bits, 0); | |
2951 | } | |
47339cd9 SV |
2952 | void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, |
2953 | uint32_t interrupt_mask, | |
2954 | uint32_t enabled_irq_mask); | |
14443261 VS |
2955 | static inline void |
2956 | ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) | |
2957 | { | |
2958 | ibx_display_interrupt_update(dev_priv, bits, bits); | |
2959 | } | |
2960 | static inline void | |
2961 | ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits) | |
2962 | { | |
2963 | ibx_display_interrupt_update(dev_priv, bits, 0); | |
2964 | } | |
2965 | ||
673a394b | 2966 | /* i915_gem.c */ |
673a394b EA |
2967 | int i915_gem_create_ioctl(struct drm_device *dev, void *data, |
2968 | struct drm_file *file_priv); | |
2969 | int i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |
2970 | struct drm_file *file_priv); | |
2971 | int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |
2972 | struct drm_file *file_priv); | |
2973 | int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |
2974 | struct drm_file *file_priv); | |
de151cf6 JB |
2975 | int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, |
2976 | struct drm_file *file_priv); | |
673a394b EA |
2977 | int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
2978 | struct drm_file *file_priv); | |
2979 | int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |
2980 | struct drm_file *file_priv); | |
2981 | int i915_gem_execbuffer(struct drm_device *dev, void *data, | |
2982 | struct drm_file *file_priv); | |
76446cac JB |
2983 | int i915_gem_execbuffer2(struct drm_device *dev, void *data, |
2984 | struct drm_file *file_priv); | |
673a394b EA |
2985 | int i915_gem_busy_ioctl(struct drm_device *dev, void *data, |
2986 | struct drm_file *file_priv); | |
199adf40 BW |
2987 | int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, |
2988 | struct drm_file *file); | |
2989 | int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, | |
2990 | struct drm_file *file); | |
673a394b EA |
2991 | int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, |
2992 | struct drm_file *file_priv); | |
3ef94daa CW |
2993 | int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, |
2994 | struct drm_file *file_priv); | |
673a394b EA |
2995 | int i915_gem_set_tiling(struct drm_device *dev, void *data, |
2996 | struct drm_file *file_priv); | |
2997 | int i915_gem_get_tiling(struct drm_device *dev, void *data, | |
2998 | struct drm_file *file_priv); | |
72778cb2 | 2999 | void i915_gem_init_userptr(struct drm_i915_private *dev_priv); |
5cc9ed4b CW |
3000 | int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, |
3001 | struct drm_file *file); | |
5a125c3c EA |
3002 | int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
3003 | struct drm_file *file_priv); | |
23ba4fd0 BW |
3004 | int i915_gem_wait_ioctl(struct drm_device *dev, void *data, |
3005 | struct drm_file *file_priv); | |
d64aa096 ID |
3006 | void i915_gem_load_init(struct drm_device *dev); |
3007 | void i915_gem_load_cleanup(struct drm_device *dev); | |
40ae4e16 | 3008 | void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); |
461fb99c CW |
3009 | int i915_gem_freeze_late(struct drm_i915_private *dev_priv); |
3010 | ||
42dcedd4 CW |
3011 | void *i915_gem_object_alloc(struct drm_device *dev); |
3012 | void i915_gem_object_free(struct drm_i915_gem_object *obj); | |
37e680a1 CW |
3013 | void i915_gem_object_init(struct drm_i915_gem_object *obj, |
3014 | const struct drm_i915_gem_object_ops *ops); | |
d37cd8a8 | 3015 | struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev, |
05394f39 | 3016 | size_t size); |
ea70299d DG |
3017 | struct drm_i915_gem_object *i915_gem_object_create_from_data( |
3018 | struct drm_device *dev, const void *data, size_t size); | |
673a394b | 3019 | void i915_gem_free_object(struct drm_gem_object *obj); |
2f633156 | 3020 | void i915_gem_vma_destroy(struct i915_vma *vma); |
42dcedd4 | 3021 | |
0875546c SV |
3022 | /* Flags used by pin/bind&friends. */ |
3023 | #define PIN_MAPPABLE (1<<0) | |
3024 | #define PIN_NONBLOCK (1<<1) | |
3025 | #define PIN_GLOBAL (1<<2) | |
3026 | #define PIN_OFFSET_BIAS (1<<3) | |
3027 | #define PIN_USER (1<<4) | |
3028 | #define PIN_UPDATE (1<<5) | |
101b506a MT |
3029 | #define PIN_ZONE_4G (1<<6) |
3030 | #define PIN_HIGH (1<<7) | |
506a8e87 | 3031 | #define PIN_OFFSET_FIXED (1<<8) |
d23db88c | 3032 | #define PIN_OFFSET_MASK (~4095) |
ec7adb6e JL |
3033 | int __must_check |
3034 | i915_gem_object_pin(struct drm_i915_gem_object *obj, | |
3035 | struct i915_address_space *vm, | |
3036 | uint32_t alignment, | |
3037 | uint64_t flags); | |
3038 | int __must_check | |
3039 | i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, | |
3040 | const struct i915_ggtt_view *view, | |
3041 | uint32_t alignment, | |
3042 | uint64_t flags); | |
fe14d5f4 TU |
3043 | |
3044 | int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, | |
3045 | u32 flags); | |
d0710abb | 3046 | void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); |
07fe0b12 | 3047 | int __must_check i915_vma_unbind(struct i915_vma *vma); |
e9f24d5f TU |
3048 | /* |
3049 | * BEWARE: Do not use the function below unless you can _absolutely_ | |
3050 | * _guarantee_ VMA in question is _not in use_ anywhere. | |
3051 | */ | |
3052 | int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma); | |
dd624afd | 3053 | int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); |
48018a57 | 3054 | void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); |
05394f39 | 3055 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); |
f787a5f5 | 3056 | |
4c914c0c BV |
3057 | int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, |
3058 | int *needs_clflush); | |
3059 | ||
37e680a1 | 3060 | int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); |
ee286370 CW |
3061 | |
3062 | static inline int __sg_page_count(struct scatterlist *sg) | |
9da3da66 | 3063 | { |
ee286370 CW |
3064 | return sg->length >> PAGE_SHIFT; |
3065 | } | |
67d5a50c | 3066 | |
033908ae DG |
3067 | struct page * |
3068 | i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n); | |
3069 | ||
341be1cd CW |
3070 | static inline dma_addr_t |
3071 | i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n) | |
3072 | { | |
3073 | if (n < obj->get_page.last) { | |
3074 | obj->get_page.sg = obj->pages->sgl; | |
3075 | obj->get_page.last = 0; | |
3076 | } | |
3077 | ||
3078 | while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) { | |
3079 | obj->get_page.last += __sg_page_count(obj->get_page.sg++); | |
3080 | if (unlikely(sg_is_chain(obj->get_page.sg))) | |
3081 | obj->get_page.sg = sg_chain_ptr(obj->get_page.sg); | |
3082 | } | |
3083 | ||
3084 | return sg_dma_address(obj->get_page.sg) + ((n - obj->get_page.last) << PAGE_SHIFT); | |
3085 | } | |
3086 | ||
ee286370 CW |
3087 | static inline struct page * |
3088 | i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) | |
9da3da66 | 3089 | { |
ee286370 CW |
3090 | if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT)) |
3091 | return NULL; | |
67d5a50c | 3092 | |
ee286370 CW |
3093 | if (n < obj->get_page.last) { |
3094 | obj->get_page.sg = obj->pages->sgl; | |
3095 | obj->get_page.last = 0; | |
3096 | } | |
67d5a50c | 3097 | |
ee286370 CW |
3098 | while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) { |
3099 | obj->get_page.last += __sg_page_count(obj->get_page.sg++); | |
3100 | if (unlikely(sg_is_chain(obj->get_page.sg))) | |
3101 | obj->get_page.sg = sg_chain_ptr(obj->get_page.sg); | |
3102 | } | |
67d5a50c | 3103 | |
ee286370 | 3104 | return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last); |
9da3da66 | 3105 | } |
ee286370 | 3106 | |
a5570178 CW |
3107 | static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) |
3108 | { | |
3109 | BUG_ON(obj->pages == NULL); | |
3110 | obj->pages_pin_count++; | |
3111 | } | |
0a798eb9 | 3112 | |
a5570178 CW |
3113 | static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) |
3114 | { | |
3115 | BUG_ON(obj->pages_pin_count == 0); | |
3116 | obj->pages_pin_count--; | |
3117 | } | |
3118 | ||
0a798eb9 CW |
3119 | /** |
3120 | * i915_gem_object_pin_map - return a contiguous mapping of the entire object | |
3121 | * @obj - the object to map into kernel address space | |
3122 | * | |
3123 | * Calls i915_gem_object_pin_pages() to prevent reaping of the object's | |
3124 | * pages and then returns a contiguous mapping of the backing storage into | |
3125 | * the kernel address space. | |
3126 | * | |
8305216f DG |
3127 | * The caller must hold the struct_mutex, and is responsible for calling |
3128 | * i915_gem_object_unpin_map() when the mapping is no longer required. | |
0a798eb9 | 3129 | * |
8305216f DG |
3130 | * Returns the pointer through which to access the mapped object, or an |
3131 | * ERR_PTR() on error. | |
0a798eb9 CW |
3132 | */ |
3133 | void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj); | |
3134 | ||
3135 | /** | |
3136 | * i915_gem_object_unpin_map - releases an earlier mapping | |
3137 | * @obj - the object to unmap | |
3138 | * | |
3139 | * After pinning the object and mapping its pages, once you are finished | |
3140 | * with your access, call i915_gem_object_unpin_map() to release the pin | |
3141 | * upon the mapping. Once the pin count reaches zero, that mapping may be | |
3142 | * removed. | |
3143 | * | |
3144 | * The caller must hold the struct_mutex. | |
3145 | */ | |
3146 | static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) | |
3147 | { | |
3148 | lockdep_assert_held(&obj->base.dev->struct_mutex); | |
3149 | i915_gem_object_unpin_pages(obj); | |
3150 | } | |
3151 | ||
54cf91dc | 3152 | int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); |
2911a35b | 3153 | int i915_gem_object_sync(struct drm_i915_gem_object *obj, |
8e637178 | 3154 | struct drm_i915_gem_request *to); |
e2d05a8b | 3155 | void i915_vma_move_to_active(struct i915_vma *vma, |
b2af0376 | 3156 | struct drm_i915_gem_request *req); |
ff72145b DA |
3157 | int i915_gem_dumb_create(struct drm_file *file_priv, |
3158 | struct drm_device *dev, | |
3159 | struct drm_mode_create_dumb *args); | |
da6b51d0 DA |
3160 | int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, |
3161 | uint32_t handle, uint64_t *offset); | |
85d1225e DG |
3162 | |
3163 | void i915_gem_track_fb(struct drm_i915_gem_object *old, | |
3164 | struct drm_i915_gem_object *new, | |
3165 | unsigned frontbuffer_bits); | |
3166 | ||
fca26bb4 | 3167 | int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); |
1690e1eb | 3168 | |
8d9fc7fd | 3169 | struct drm_i915_gem_request * |
0bc40be8 | 3170 | i915_gem_find_active_request(struct intel_engine_cs *engine); |
8d9fc7fd | 3171 | |
67d97da3 | 3172 | void i915_gem_retire_requests(struct drm_i915_private *dev_priv); |
0bc40be8 | 3173 | void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); |
84c33a64 | 3174 | |
c19ae989 CW |
3175 | static inline u32 i915_reset_counter(struct i915_gpu_error *error) |
3176 | { | |
3177 | return atomic_read(&error->reset_counter); | |
3178 | } | |
3179 | ||
3180 | static inline bool __i915_reset_in_progress(u32 reset) | |
3181 | { | |
3182 | return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG); | |
3183 | } | |
3184 | ||
3185 | static inline bool __i915_reset_in_progress_or_wedged(u32 reset) | |
3186 | { | |
3187 | return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED)); | |
3188 | } | |
3189 | ||
3190 | static inline bool __i915_terminally_wedged(u32 reset) | |
3191 | { | |
3192 | return unlikely(reset & I915_WEDGED); | |
3193 | } | |
3194 | ||
1f83fee0 SV |
3195 | static inline bool i915_reset_in_progress(struct i915_gpu_error *error) |
3196 | { | |
c19ae989 CW |
3197 | return __i915_reset_in_progress(i915_reset_counter(error)); |
3198 | } | |
3199 | ||
3200 | static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error) | |
3201 | { | |
3202 | return __i915_reset_in_progress_or_wedged(i915_reset_counter(error)); | |
1f83fee0 SV |
3203 | } |
3204 | ||
3205 | static inline bool i915_terminally_wedged(struct i915_gpu_error *error) | |
3206 | { | |
c19ae989 | 3207 | return __i915_terminally_wedged(i915_reset_counter(error)); |
2ac0f450 MK |
3208 | } |
3209 | ||
3210 | static inline u32 i915_reset_count(struct i915_gpu_error *error) | |
3211 | { | |
c19ae989 | 3212 | return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2; |
1f83fee0 | 3213 | } |
a71d8d94 | 3214 | |
069efc1d | 3215 | void i915_gem_reset(struct drm_device *dev); |
000433b6 | 3216 | bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); |
1070a42b | 3217 | int __must_check i915_gem_init(struct drm_device *dev); |
f691e2f4 SV |
3218 | int __must_check i915_gem_init_hw(struct drm_device *dev); |
3219 | void i915_gem_init_swizzling(struct drm_device *dev); | |
117897f4 | 3220 | void i915_gem_cleanup_engines(struct drm_device *dev); |
6e5a5beb | 3221 | int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv); |
45c5f202 | 3222 | int __must_check i915_gem_suspend(struct drm_device *dev); |
5ab57c70 | 3223 | void i915_gem_resume(struct drm_device *dev); |
de151cf6 | 3224 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
2021746e | 3225 | int __must_check |
2e2f351d CW |
3226 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, |
3227 | bool readonly); | |
3228 | int __must_check | |
2021746e CW |
3229 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, |
3230 | bool write); | |
3231 | int __must_check | |
dabdfe02 CW |
3232 | i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); |
3233 | int __must_check | |
2da3b9b9 CW |
3234 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
3235 | u32 alignment, | |
e6617330 TU |
3236 | const struct i915_ggtt_view *view); |
3237 | void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, | |
3238 | const struct i915_ggtt_view *view); | |
00731155 | 3239 | int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, |
6eeefaf3 | 3240 | int align); |
b29c19b6 | 3241 | int i915_gem_open(struct drm_device *dev, struct drm_file *file); |
05394f39 | 3242 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
673a394b | 3243 | |
0fa87796 ID |
3244 | uint32_t |
3245 | i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); | |
467cffba | 3246 | uint32_t |
d865110c ID |
3247 | i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, |
3248 | int tiling_mode, bool fenced); | |
467cffba | 3249 | |
e4ffd173 CW |
3250 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, |
3251 | enum i915_cache_level cache_level); | |
3252 | ||
1286ff73 SV |
3253 | struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, |
3254 | struct dma_buf *dma_buf); | |
3255 | ||
3256 | struct dma_buf *i915_gem_prime_export(struct drm_device *dev, | |
3257 | struct drm_gem_object *gem_obj, int flags); | |
3258 | ||
088e0df4 MT |
3259 | u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, |
3260 | const struct i915_ggtt_view *view); | |
3261 | u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, | |
3262 | struct i915_address_space *vm); | |
3263 | static inline u64 | |
ec7adb6e | 3264 | i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) |
fe14d5f4 | 3265 | { |
9abc4648 | 3266 | return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal); |
fe14d5f4 | 3267 | } |
ec7adb6e | 3268 | |
a70a3148 | 3269 | bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); |
ec7adb6e | 3270 | bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, |
9abc4648 | 3271 | const struct i915_ggtt_view *view); |
a70a3148 | 3272 | bool i915_gem_obj_bound(struct drm_i915_gem_object *o, |
ec7adb6e | 3273 | struct i915_address_space *vm); |
fe14d5f4 | 3274 | |
fe14d5f4 | 3275 | struct i915_vma * |
ec7adb6e JL |
3276 | i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, |
3277 | struct i915_address_space *vm); | |
3278 | struct i915_vma * | |
3279 | i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, | |
3280 | const struct i915_ggtt_view *view); | |
fe14d5f4 | 3281 | |
accfef2e BW |
3282 | struct i915_vma * |
3283 | i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, | |
ec7adb6e JL |
3284 | struct i915_address_space *vm); |
3285 | struct i915_vma * | |
3286 | i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, | |
3287 | const struct i915_ggtt_view *view); | |
5c2abbea | 3288 | |
ec7adb6e JL |
3289 | static inline struct i915_vma * |
3290 | i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) | |
3291 | { | |
3292 | return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal); | |
d7f46fc4 | 3293 | } |
ec7adb6e | 3294 | bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); |
5c2abbea | 3295 | |
a70a3148 | 3296 | /* Some GGTT VM helpers */ |
841cd773 SV |
3297 | static inline struct i915_hw_ppgtt * |
3298 | i915_vm_to_ppgtt(struct i915_address_space *vm) | |
3299 | { | |
841cd773 SV |
3300 | return container_of(vm, struct i915_hw_ppgtt, base); |
3301 | } | |
3302 | ||
3303 | ||
a70a3148 BW |
3304 | static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) |
3305 | { | |
9abc4648 | 3306 | return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); |
a70a3148 BW |
3307 | } |
3308 | ||
8da32727 TU |
3309 | unsigned long |
3310 | i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj); | |
c37e2204 BW |
3311 | |
3312 | static inline int __must_check | |
3313 | i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, | |
3314 | uint32_t alignment, | |
1ec9e26d | 3315 | unsigned flags) |
c37e2204 | 3316 | { |
72e96d64 JL |
3317 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
3318 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | |
3319 | ||
3320 | return i915_gem_object_pin(obj, &ggtt->base, | |
5dc383b0 | 3321 | alignment, flags | PIN_GLOBAL); |
c37e2204 | 3322 | } |
a70a3148 | 3323 | |
e6617330 TU |
3324 | void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, |
3325 | const struct i915_ggtt_view *view); | |
3326 | static inline void | |
3327 | i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) | |
3328 | { | |
3329 | i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal); | |
3330 | } | |
b287110e | 3331 | |
41a36b73 SV |
3332 | /* i915_gem_fence.c */ |
3333 | int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); | |
3334 | int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); | |
3335 | ||
3336 | bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj); | |
3337 | void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj); | |
3338 | ||
3339 | void i915_gem_restore_fences(struct drm_device *dev); | |
3340 | ||
7f96ecaf SV |
3341 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
3342 | void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); | |
3343 | void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); | |
3344 | ||
254f965c | 3345 | /* i915_gem_context.c */ |
8245be31 | 3346 | int __must_check i915_gem_context_init(struct drm_device *dev); |
b2e862d0 | 3347 | void i915_gem_context_lost(struct drm_i915_private *dev_priv); |
254f965c | 3348 | void i915_gem_context_fini(struct drm_device *dev); |
acce9ffa | 3349 | void i915_gem_context_reset(struct drm_device *dev); |
e422b888 | 3350 | int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); |
254f965c | 3351 | void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); |
ba01cc93 | 3352 | int i915_switch_context(struct drm_i915_gem_request *req); |
945657b4 | 3353 | int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv); |
dce3271b | 3354 | void i915_gem_context_free(struct kref *ctx_ref); |
8c857917 OM |
3355 | struct drm_i915_gem_object * |
3356 | i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); | |
c8c35799 ZW |
3357 | struct i915_gem_context * |
3358 | i915_gem_context_create_gvt(struct drm_device *dev); | |
ca585b5d CW |
3359 | |
3360 | static inline struct i915_gem_context * | |
3361 | i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) | |
3362 | { | |
3363 | struct i915_gem_context *ctx; | |
3364 | ||
091387c1 | 3365 | lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex); |
ca585b5d CW |
3366 | |
3367 | ctx = idr_find(&file_priv->context_idr, id); | |
3368 | if (!ctx) | |
3369 | return ERR_PTR(-ENOENT); | |
3370 | ||
3371 | return ctx; | |
3372 | } | |
3373 | ||
9a6feaf0 CW |
3374 | static inline struct i915_gem_context * |
3375 | i915_gem_context_get(struct i915_gem_context *ctx) | |
dce3271b | 3376 | { |
691e6415 | 3377 | kref_get(&ctx->ref); |
9a6feaf0 | 3378 | return ctx; |
dce3271b MK |
3379 | } |
3380 | ||
9a6feaf0 | 3381 | static inline void i915_gem_context_put(struct i915_gem_context *ctx) |
dce3271b | 3382 | { |
091387c1 | 3383 | lockdep_assert_held(&ctx->i915->drm.struct_mutex); |
691e6415 | 3384 | kref_put(&ctx->ref, i915_gem_context_free); |
dce3271b MK |
3385 | } |
3386 | ||
e2efd130 | 3387 | static inline bool i915_gem_context_is_default(const struct i915_gem_context *c) |
3fac8978 | 3388 | { |
821d66dd | 3389 | return c->user_handle == DEFAULT_CONTEXT_HANDLE; |
3fac8978 MK |
3390 | } |
3391 | ||
84624813 BW |
3392 | int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, |
3393 | struct drm_file *file); | |
3394 | int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, | |
3395 | struct drm_file *file); | |
c9dc0f35 CW |
3396 | int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, |
3397 | struct drm_file *file_priv); | |
3398 | int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, | |
3399 | struct drm_file *file_priv); | |
d538704b CW |
3400 | int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, |
3401 | struct drm_file *file); | |
1286ff73 | 3402 | |
679845ed BW |
3403 | /* i915_gem_evict.c */ |
3404 | int __must_check i915_gem_evict_something(struct drm_device *dev, | |
3405 | struct i915_address_space *vm, | |
3406 | int min_size, | |
3407 | unsigned alignment, | |
3408 | unsigned cache_level, | |
d23db88c CW |
3409 | unsigned long start, |
3410 | unsigned long end, | |
1ec9e26d | 3411 | unsigned flags); |
506a8e87 | 3412 | int __must_check i915_gem_evict_for_vma(struct i915_vma *target); |
679845ed | 3413 | int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); |
1d2a314c | 3414 | |
0260c420 | 3415 | /* belongs in i915_gem_gtt.h */ |
c033666a | 3416 | static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) |
e76e9aeb | 3417 | { |
c033666a | 3418 | if (INTEL_GEN(dev_priv) < 6) |
e76e9aeb BW |
3419 | intel_gtt_chipset_flush(); |
3420 | } | |
246cbfb5 | 3421 | |
9797fbfb | 3422 | /* i915_gem_stolen.c */ |
d713fd49 PZ |
3423 | int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, |
3424 | struct drm_mm_node *node, u64 size, | |
3425 | unsigned alignment); | |
a9da512b PZ |
3426 | int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, |
3427 | struct drm_mm_node *node, u64 size, | |
3428 | unsigned alignment, u64 start, | |
3429 | u64 end); | |
d713fd49 PZ |
3430 | void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, |
3431 | struct drm_mm_node *node); | |
9797fbfb CW |
3432 | int i915_gem_init_stolen(struct drm_device *dev); |
3433 | void i915_gem_cleanup_stolen(struct drm_device *dev); | |
0104fdbb CW |
3434 | struct drm_i915_gem_object * |
3435 | i915_gem_object_create_stolen(struct drm_device *dev, u32 size); | |
866d12b4 CW |
3436 | struct drm_i915_gem_object * |
3437 | i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, | |
3438 | u32 stolen_offset, | |
3439 | u32 gtt_offset, | |
3440 | u32 size); | |
9797fbfb | 3441 | |
be6a0376 SV |
3442 | /* i915_gem_shrinker.c */ |
3443 | unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, | |
14387540 | 3444 | unsigned long target, |
be6a0376 SV |
3445 | unsigned flags); |
3446 | #define I915_SHRINK_PURGEABLE 0x1 | |
3447 | #define I915_SHRINK_UNBOUND 0x2 | |
3448 | #define I915_SHRINK_BOUND 0x4 | |
5763ff04 | 3449 | #define I915_SHRINK_ACTIVE 0x8 |
eae2c43b | 3450 | #define I915_SHRINK_VMAPS 0x10 |
be6a0376 SV |
3451 | unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); |
3452 | void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); | |
a8a40589 | 3453 | void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv); |
be6a0376 SV |
3454 | |
3455 | ||
673a394b | 3456 | /* i915_gem_tiling.c */ |
2c1792a1 | 3457 | static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) |
e9b73c67 | 3458 | { |
091387c1 | 3459 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
e9b73c67 CW |
3460 | |
3461 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && | |
3462 | obj->tiling_mode != I915_TILING_NONE; | |
3463 | } | |
3464 | ||
673a394b | 3465 | /* i915_gem_debug.c */ |
23bc5982 CW |
3466 | #if WATCH_LISTS |
3467 | int i915_verify_lists(struct drm_device *dev); | |
673a394b | 3468 | #else |
23bc5982 | 3469 | #define i915_verify_lists(dev) 0 |
673a394b | 3470 | #endif |
1da177e4 | 3471 | |
2017263e | 3472 | /* i915_debugfs.c */ |
f8c168fa | 3473 | #ifdef CONFIG_DEBUG_FS |
1dac891c CW |
3474 | int i915_debugfs_register(struct drm_i915_private *dev_priv); |
3475 | void i915_debugfs_unregister(struct drm_i915_private *dev_priv); | |
249e87de | 3476 | int i915_debugfs_connector_add(struct drm_connector *connector); |
07144428 DL |
3477 | void intel_display_crc_init(struct drm_device *dev); |
3478 | #else | |
8d35acba CW |
3479 | static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;} |
3480 | static inline void i915_debugfs_unregister(struct drm_i915_private *dev_priv) {} | |
101057fa SV |
3481 | static inline int i915_debugfs_connector_add(struct drm_connector *connector) |
3482 | { return 0; } | |
f8c168fa | 3483 | static inline void intel_display_crc_init(struct drm_device *dev) {} |
07144428 | 3484 | #endif |
84734a04 MK |
3485 | |
3486 | /* i915_gpu_error.c */ | |
edc3d884 MK |
3487 | __printf(2, 3) |
3488 | void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); | |
fc16b48b MK |
3489 | int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, |
3490 | const struct i915_error_state_file_priv *error); | |
4dc955f7 | 3491 | int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, |
0a4cd7c8 | 3492 | struct drm_i915_private *i915, |
4dc955f7 MK |
3493 | size_t count, loff_t pos); |
3494 | static inline void i915_error_state_buf_release( | |
3495 | struct drm_i915_error_state_buf *eb) | |
3496 | { | |
3497 | kfree(eb->buf); | |
3498 | } | |
c033666a CW |
3499 | void i915_capture_error_state(struct drm_i915_private *dev_priv, |
3500 | u32 engine_mask, | |
58174462 | 3501 | const char *error_msg); |
84734a04 MK |
3502 | void i915_error_state_get(struct drm_device *dev, |
3503 | struct i915_error_state_file_priv *error_priv); | |
3504 | void i915_error_state_put(struct i915_error_state_file_priv *error_priv); | |
3505 | void i915_destroy_error_state(struct drm_device *dev); | |
3506 | ||
c033666a | 3507 | void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone); |
0a4cd7c8 | 3508 | const char *i915_cache_level_str(struct drm_i915_private *i915, int type); |
2017263e | 3509 | |
351e3db2 | 3510 | /* i915_cmd_parser.c */ |
1ca3712c | 3511 | int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); |
33a051a5 CW |
3512 | int intel_engine_init_cmd_parser(struct intel_engine_cs *engine); |
3513 | void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); | |
3514 | bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine); | |
3515 | int intel_engine_cmd_parser(struct intel_engine_cs *engine, | |
3516 | struct drm_i915_gem_object *batch_obj, | |
3517 | struct drm_i915_gem_object *shadow_batch_obj, | |
3518 | u32 batch_start_offset, | |
3519 | u32 batch_len, | |
3520 | bool is_master); | |
351e3db2 | 3521 | |
317c35d1 JB |
3522 | /* i915_suspend.c */ |
3523 | extern int i915_save_state(struct drm_device *dev); | |
3524 | extern int i915_restore_state(struct drm_device *dev); | |
0a3e67a4 | 3525 | |
0136db58 BW |
3526 | /* i915_sysfs.c */ |
3527 | void i915_setup_sysfs(struct drm_device *dev_priv); | |
3528 | void i915_teardown_sysfs(struct drm_device *dev_priv); | |
3529 | ||
f899fc64 CW |
3530 | /* intel_i2c.c */ |
3531 | extern int intel_setup_gmbus(struct drm_device *dev); | |
3532 | extern void intel_teardown_gmbus(struct drm_device *dev); | |
88ac7939 JN |
3533 | extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, |
3534 | unsigned int pin); | |
3bd7d909 | 3535 | |
0184df46 JN |
3536 | extern struct i2c_adapter * |
3537 | intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); | |
e957d772 CW |
3538 | extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); |
3539 | extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); | |
8f375e10 | 3540 | static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) |
b8232e90 CW |
3541 | { |
3542 | return container_of(adapter, struct intel_gmbus, adapter)->force_bit; | |
3543 | } | |
f899fc64 CW |
3544 | extern void intel_i2c_reset(struct drm_device *dev); |
3545 | ||
8b8e1a89 | 3546 | /* intel_bios.c */ |
98f3a1dc | 3547 | int intel_bios_init(struct drm_i915_private *dev_priv); |
f0067a31 | 3548 | bool intel_bios_is_valid_vbt(const void *buf, size_t size); |
3bdd14d5 | 3549 | bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); |
5a69d13d | 3550 | bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); |
22f35042 | 3551 | bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); |
951d9efe | 3552 | bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); |
d6199256 | 3553 | bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); |
7137aec1 | 3554 | bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); |
d252bf68 SS |
3555 | bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, |
3556 | enum port port); | |
8b8e1a89 | 3557 | |
3b617967 | 3558 | /* intel_opregion.c */ |
44834a67 | 3559 | #ifdef CONFIG_ACPI |
6f9f4b7a | 3560 | extern int intel_opregion_setup(struct drm_i915_private *dev_priv); |
03d92e47 CW |
3561 | extern void intel_opregion_register(struct drm_i915_private *dev_priv); |
3562 | extern void intel_opregion_unregister(struct drm_i915_private *dev_priv); | |
91d14251 | 3563 | extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv); |
9c4b0a68 JN |
3564 | extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, |
3565 | bool enable); | |
6f9f4b7a | 3566 | extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv, |
ecbc5cf3 | 3567 | pci_power_t state); |
6f9f4b7a | 3568 | extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv); |
65e082c9 | 3569 | #else |
6f9f4b7a | 3570 | static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; } |
bdaa2dfb RD |
3571 | static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { } |
3572 | static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { } | |
91d14251 TU |
3573 | static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) |
3574 | { | |
3575 | } | |
9c4b0a68 JN |
3576 | static inline int |
3577 | intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) | |
3578 | { | |
3579 | return 0; | |
3580 | } | |
ecbc5cf3 | 3581 | static inline int |
6f9f4b7a | 3582 | intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state) |
ecbc5cf3 JN |
3583 | { |
3584 | return 0; | |
3585 | } | |
6f9f4b7a | 3586 | static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev) |
a0562819 VS |
3587 | { |
3588 | return -ENODEV; | |
3589 | } | |
65e082c9 | 3590 | #endif |
8ee1c3db | 3591 | |
723bfd70 JB |
3592 | /* intel_acpi.c */ |
3593 | #ifdef CONFIG_ACPI | |
3594 | extern void intel_register_dsm_handler(void); | |
3595 | extern void intel_unregister_dsm_handler(void); | |
3596 | #else | |
3597 | static inline void intel_register_dsm_handler(void) { return; } | |
3598 | static inline void intel_unregister_dsm_handler(void) { return; } | |
3599 | #endif /* CONFIG_ACPI */ | |
3600 | ||
94b4f3ba CW |
3601 | /* intel_device_info.c */ |
3602 | static inline struct intel_device_info * | |
3603 | mkwrite_device_info(struct drm_i915_private *dev_priv) | |
3604 | { | |
3605 | return (struct intel_device_info *)&dev_priv->info; | |
3606 | } | |
3607 | ||
3608 | void intel_device_info_runtime_init(struct drm_i915_private *dev_priv); | |
3609 | void intel_device_info_dump(struct drm_i915_private *dev_priv); | |
3610 | ||
79e53945 | 3611 | /* modesetting */ |
f817586c | 3612 | extern void intel_modeset_init_hw(struct drm_device *dev); |
79e53945 | 3613 | extern void intel_modeset_init(struct drm_device *dev); |
2c7111db | 3614 | extern void intel_modeset_gem_init(struct drm_device *dev); |
79e53945 | 3615 | extern void intel_modeset_cleanup(struct drm_device *dev); |
1ebaa0b9 | 3616 | extern int intel_connector_register(struct drm_connector *); |
c191eca1 | 3617 | extern void intel_connector_unregister(struct drm_connector *); |
28d52043 | 3618 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
043e9bda | 3619 | extern void intel_display_resume(struct drm_device *dev); |
44cec740 | 3620 | extern void i915_redisable_vga(struct drm_device *dev); |
04098753 | 3621 | extern void i915_redisable_vga_power_on(struct drm_device *dev); |
91d14251 | 3622 | extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); |
dde86e2d | 3623 | extern void intel_init_pch_refclk(struct drm_device *dev); |
dc97997a | 3624 | extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val); |
5209b1f4 ID |
3625 | extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, |
3626 | bool enable); | |
3bad0781 | 3627 | |
c0c7babc BW |
3628 | int i915_reg_read_ioctl(struct drm_device *dev, void *data, |
3629 | struct drm_file *file); | |
575155a9 | 3630 | |
6ef3d427 | 3631 | /* overlay */ |
c033666a CW |
3632 | extern struct intel_overlay_error_state * |
3633 | intel_overlay_capture_error_state(struct drm_i915_private *dev_priv); | |
edc3d884 MK |
3634 | extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, |
3635 | struct intel_overlay_error_state *error); | |
c4a1d9e4 | 3636 | |
c033666a CW |
3637 | extern struct intel_display_error_state * |
3638 | intel_display_capture_error_state(struct drm_i915_private *dev_priv); | |
edc3d884 | 3639 | extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, |
c4a1d9e4 CW |
3640 | struct drm_device *dev, |
3641 | struct intel_display_error_state *error); | |
6ef3d427 | 3642 | |
151a49d0 TR |
3643 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); |
3644 | int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); | |
59de0813 JN |
3645 | |
3646 | /* intel_sideband.c */ | |
707b6e3d D |
3647 | u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); |
3648 | void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); | |
64936258 | 3649 | u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); |
dfb19ed2 D |
3650 | u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg); |
3651 | void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val); | |
e9f882a3 JN |
3652 | u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); |
3653 | void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); | |
3654 | u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); | |
3655 | void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); | |
f3419158 JB |
3656 | u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); |
3657 | void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); | |
5e69f97f CML |
3658 | u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg); |
3659 | void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val); | |
59de0813 JN |
3660 | u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, |
3661 | enum intel_sbi_destination destination); | |
3662 | void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, | |
3663 | enum intel_sbi_destination destination); | |
e9fe51c6 SK |
3664 | u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); |
3665 | void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); | |
0a073b84 | 3666 | |
b7fa22d8 ACO |
3667 | /* intel_dpio_phy.c */ |
3668 | void chv_set_phy_signal_level(struct intel_encoder *encoder, | |
3669 | u32 deemph_reg_value, u32 margin_reg_value, | |
3670 | bool uniq_trans_scale); | |
844b2f9a ACO |
3671 | void chv_data_lane_soft_reset(struct intel_encoder *encoder, |
3672 | bool reset); | |
419b1b7a | 3673 | void chv_phy_pre_pll_enable(struct intel_encoder *encoder); |
e7d2a717 ACO |
3674 | void chv_phy_pre_encoder_enable(struct intel_encoder *encoder); |
3675 | void chv_phy_release_cl2_override(struct intel_encoder *encoder); | |
204970b5 | 3676 | void chv_phy_post_pll_disable(struct intel_encoder *encoder); |
b7fa22d8 | 3677 | |
53d98725 ACO |
3678 | void vlv_set_phy_signal_level(struct intel_encoder *encoder, |
3679 | u32 demph_reg_value, u32 preemph_reg_value, | |
3680 | u32 uniqtranscale_reg_value, u32 tx3_demph); | |
6da2e616 | 3681 | void vlv_phy_pre_pll_enable(struct intel_encoder *encoder); |
5f68c275 | 3682 | void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder); |
0f572ebe | 3683 | void vlv_phy_reset_lanes(struct intel_encoder *encoder); |
53d98725 | 3684 | |
616bc820 VS |
3685 | int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); |
3686 | int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); | |
c8d9a590 | 3687 | |
0b274481 BW |
3688 | #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) |
3689 | #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) | |
3690 | ||
3691 | #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) | |
3692 | #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) | |
3693 | #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) | |
3694 | #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) | |
3695 | ||
3696 | #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) | |
3697 | #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) | |
3698 | #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) | |
3699 | #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) | |
3700 | ||
698b3135 CW |
3701 | /* Be very careful with read/write 64-bit values. On 32-bit machines, they |
3702 | * will be implemented using 2 32-bit writes in an arbitrary order with | |
3703 | * an arbitrary delay between them. This can cause the hardware to | |
3704 | * act upon the intermediate value, possibly leading to corruption and | |
3705 | * machine death. You have been warned. | |
3706 | */ | |
0b274481 BW |
3707 | #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) |
3708 | #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) | |
cae5852d | 3709 | |
50877445 | 3710 | #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ |
acd29f7b CW |
3711 | u32 upper, lower, old_upper, loop = 0; \ |
3712 | upper = I915_READ(upper_reg); \ | |
ee0a227b | 3713 | do { \ |
acd29f7b | 3714 | old_upper = upper; \ |
ee0a227b | 3715 | lower = I915_READ(lower_reg); \ |
acd29f7b CW |
3716 | upper = I915_READ(upper_reg); \ |
3717 | } while (upper != old_upper && loop++ < 2); \ | |
ee0a227b | 3718 | (u64)upper << 32 | lower; }) |
50877445 | 3719 | |
cae5852d ZN |
3720 | #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) |
3721 | #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) | |
3722 | ||
75aa3f63 VS |
3723 | #define __raw_read(x, s) \ |
3724 | static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \ | |
f0f59a00 | 3725 | i915_reg_t reg) \ |
75aa3f63 | 3726 | { \ |
f0f59a00 | 3727 | return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ |
75aa3f63 VS |
3728 | } |
3729 | ||
3730 | #define __raw_write(x, s) \ | |
3731 | static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \ | |
f0f59a00 | 3732 | i915_reg_t reg, uint##x##_t val) \ |
75aa3f63 | 3733 | { \ |
f0f59a00 | 3734 | write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \ |
75aa3f63 VS |
3735 | } |
3736 | __raw_read(8, b) | |
3737 | __raw_read(16, w) | |
3738 | __raw_read(32, l) | |
3739 | __raw_read(64, q) | |
3740 | ||
3741 | __raw_write(8, b) | |
3742 | __raw_write(16, w) | |
3743 | __raw_write(32, l) | |
3744 | __raw_write(64, q) | |
3745 | ||
3746 | #undef __raw_read | |
3747 | #undef __raw_write | |
3748 | ||
a6111f7b CW |
3749 | /* These are untraced mmio-accessors that are only valid to be used inside |
3750 | * criticial sections inside IRQ handlers where forcewake is explicitly | |
3751 | * controlled. | |
3752 | * Think twice, and think again, before using these. | |
3753 | * Note: Should only be used between intel_uncore_forcewake_irqlock() and | |
3754 | * intel_uncore_forcewake_irqunlock(). | |
3755 | */ | |
75aa3f63 VS |
3756 | #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__)) |
3757 | #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__)) | |
76f8421f | 3758 | #define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__)) |
a6111f7b CW |
3759 | #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) |
3760 | ||
55bc60db VS |
3761 | /* "Broadcast RGB" property */ |
3762 | #define INTEL_BROADCAST_RGB_AUTO 0 | |
3763 | #define INTEL_BROADCAST_RGB_FULL 1 | |
3764 | #define INTEL_BROADCAST_RGB_LIMITED 2 | |
ba4f01a3 | 3765 | |
f0f59a00 | 3766 | static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev) |
766aa1c4 | 3767 | { |
666a4537 | 3768 | if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) |
766aa1c4 | 3769 | return VLV_VGACNTRL; |
92e23b99 SJ |
3770 | else if (INTEL_INFO(dev)->gen >= 5) |
3771 | return CPU_VGACNTRL; | |
766aa1c4 VS |
3772 | else |
3773 | return VGACNTRL; | |
3774 | } | |
3775 | ||
df97729f ID |
3776 | static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) |
3777 | { | |
3778 | unsigned long j = msecs_to_jiffies(m); | |
3779 | ||
3780 | return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); | |
3781 | } | |
3782 | ||
7bd0e226 SV |
3783 | static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) |
3784 | { | |
3785 | return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); | |
3786 | } | |
3787 | ||
df97729f ID |
3788 | static inline unsigned long |
3789 | timespec_to_jiffies_timeout(const struct timespec *value) | |
3790 | { | |
3791 | unsigned long j = timespec_to_jiffies(value); | |
3792 | ||
3793 | return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); | |
3794 | } | |
3795 | ||
dce56b3c PZ |
3796 | /* |
3797 | * If you need to wait X milliseconds between events A and B, but event B | |
3798 | * doesn't happen exactly after event A, you record the timestamp (jiffies) of | |
3799 | * when event A happened, then just before event B you call this function and | |
3800 | * pass the timestamp as the first argument, and X as the second argument. | |
3801 | */ | |
3802 | static inline void | |
3803 | wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) | |
3804 | { | |
ec5e0cfb | 3805 | unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; |
dce56b3c PZ |
3806 | |
3807 | /* | |
3808 | * Don't re-read the value of "jiffies" every time since it may change | |
3809 | * behind our back and break the math. | |
3810 | */ | |
3811 | tmp_jiffies = jiffies; | |
3812 | target_jiffies = timestamp_jiffies + | |
3813 | msecs_to_jiffies_timeout(to_wait_ms); | |
3814 | ||
3815 | if (time_after(target_jiffies, tmp_jiffies)) { | |
ec5e0cfb ID |
3816 | remaining_jiffies = target_jiffies - tmp_jiffies; |
3817 | while (remaining_jiffies) | |
3818 | remaining_jiffies = | |
3819 | schedule_timeout_uninterruptible(remaining_jiffies); | |
dce56b3c PZ |
3820 | } |
3821 | } | |
688e6c72 CW |
3822 | static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req) |
3823 | { | |
f69a02c9 CW |
3824 | struct intel_engine_cs *engine = req->engine; |
3825 | ||
7ec2c73b CW |
3826 | /* Before we do the heavier coherent read of the seqno, |
3827 | * check the value (hopefully) in the CPU cacheline. | |
3828 | */ | |
3829 | if (i915_gem_request_completed(req)) | |
3830 | return true; | |
3831 | ||
688e6c72 CW |
3832 | /* Ensure our read of the seqno is coherent so that we |
3833 | * do not "miss an interrupt" (i.e. if this is the last | |
3834 | * request and the seqno write from the GPU is not visible | |
3835 | * by the time the interrupt fires, we will see that the | |
3836 | * request is incomplete and go back to sleep awaiting | |
3837 | * another interrupt that will never come.) | |
3838 | * | |
3839 | * Strictly, we only need to do this once after an interrupt, | |
3840 | * but it is easier and safer to do it every time the waiter | |
3841 | * is woken. | |
3842 | */ | |
3d5564e9 | 3843 | if (engine->irq_seqno_barrier && |
aca34b6e CW |
3844 | READ_ONCE(engine->breadcrumbs.irq_seqno_bh) == current && |
3845 | cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) { | |
99fe4a5f CW |
3846 | struct task_struct *tsk; |
3847 | ||
3d5564e9 CW |
3848 | /* The ordering of irq_posted versus applying the barrier |
3849 | * is crucial. The clearing of the current irq_posted must | |
3850 | * be visible before we perform the barrier operation, | |
3851 | * such that if a subsequent interrupt arrives, irq_posted | |
3852 | * is reasserted and our task rewoken (which causes us to | |
3853 | * do another __i915_request_irq_complete() immediately | |
3854 | * and reapply the barrier). Conversely, if the clear | |
3855 | * occurs after the barrier, then an interrupt that arrived | |
3856 | * whilst we waited on the barrier would not trigger a | |
3857 | * barrier on the next pass, and the read may not see the | |
3858 | * seqno update. | |
3859 | */ | |
f69a02c9 | 3860 | engine->irq_seqno_barrier(engine); |
99fe4a5f CW |
3861 | |
3862 | /* If we consume the irq, but we are no longer the bottom-half, | |
3863 | * the real bottom-half may not have serialised their own | |
3864 | * seqno check with the irq-barrier (i.e. may have inspected | |
3865 | * the seqno before we believe it coherent since they see | |
3866 | * irq_posted == false but we are still running). | |
3867 | */ | |
3868 | rcu_read_lock(); | |
aca34b6e | 3869 | tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh); |
99fe4a5f CW |
3870 | if (tsk && tsk != current) |
3871 | /* Note that if the bottom-half is changed as we | |
3872 | * are sending the wake-up, the new bottom-half will | |
3873 | * be woken by whomever made the change. We only have | |
3874 | * to worry about when we steal the irq-posted for | |
3875 | * ourself. | |
3876 | */ | |
3877 | wake_up_process(tsk); | |
3878 | rcu_read_unlock(); | |
3879 | ||
7ec2c73b CW |
3880 | if (i915_gem_request_completed(req)) |
3881 | return true; | |
3882 | } | |
688e6c72 CW |
3883 | |
3884 | /* We need to check whether any gpu reset happened in between | |
3885 | * the request being submitted and now. If a reset has occurred, | |
3886 | * the seqno will have been advance past ours and our request | |
3887 | * is complete. If we are in the process of handling a reset, | |
3888 | * the request is effectively complete as the rendering will | |
3889 | * be discarded, but we need to return in order to drop the | |
3890 | * struct_mutex. | |
3891 | */ | |
3892 | if (i915_reset_in_progress(&req->i915->gpu_error)) | |
3893 | return true; | |
3894 | ||
3895 | return false; | |
3896 | } | |
3897 | ||
1da177e4 | 3898 | #endif |