2 * SPDX-License-Identifier: MIT
4 * Copyright � 2008-2018 Intel Corporation
7 #ifndef _I915_GPU_ERROR_H_
8 #define _I915_GPU_ERROR_H_
10 #include <linux/kref.h>
11 #include <linux/ktime.h>
12 #include <linux/sched.h>
14 #include <drm/drm_mm.h>
16 #include "intel_device_info.h"
17 #include "intel_ringbuffer.h"
18 #include "intel_uc_fw.h"
21 #include "i915_gem_gtt.h"
22 #include "i915_params.h"
23 #include "i915_scheduler.h"
25 struct drm_i915_private;
26 struct intel_overlay_error_state;
27 struct intel_display_error_state;
29 struct i915_gpu_state {
34 unsigned long capture;
37 struct drm_i915_private *i915;
47 struct intel_device_info device_info;
48 struct intel_driver_caps driver_caps;
49 struct i915_params params;
51 struct i915_error_uc {
52 struct intel_uc_fw guc_fw;
53 struct intel_uc_fw huc_fw;
54 struct drm_i915_error_object *guc_log;
57 /* Generic register state */
65 u32 error; /* gen6+ */
66 u32 err_int; /* gen7 */
67 u32 fault_data0; /* gen8, gen9 */
68 u32 fault_data1; /* gen8, gen9 */
76 u64 fence[I915_MAX_NUM_FENCES];
77 struct intel_overlay_error_state *overlay;
78 struct intel_display_error_state *display;
80 struct drm_i915_error_engine {
82 /* Software tracked state */
86 unsigned long hangcheck_timestamp;
87 bool hangcheck_stalled;
88 enum intel_engine_hangcheck_action hangcheck_action;
89 struct i915_address_space *vm;
93 /* position of active request inside the ring */
94 u32 rq_head, rq_post, rq_tail;
96 /* our own tracking of ring head and tail */
119 u32 rc_psmi; /* sleep state */
120 u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
121 struct intel_instdone instdone;
123 struct drm_i915_error_context {
124 char comm[TASK_COMM_LEN];
132 struct i915_sched_attr sched_attr;
135 struct drm_i915_error_object {
141 } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
143 struct drm_i915_error_object **user_bo;
146 struct drm_i915_error_object *wa_ctx;
147 struct drm_i915_error_object *default_state;
149 struct drm_i915_error_request {
158 struct i915_sched_attr sched_attr;
159 } *requests, execlist[EXECLIST_MAX_PORTS];
160 unsigned int num_ports;
162 struct drm_i915_error_waiter {
163 char comm[TASK_COMM_LEN];
175 } engine[I915_NUM_ENGINES];
177 struct drm_i915_error_buffer {
180 u32 rseqno[I915_NUM_ENGINES], wseqno;
184 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
191 } *active_bo[I915_NUM_ENGINES], *pinned_bo;
192 u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
193 struct i915_address_space *active_vm[I915_NUM_ENGINES];
196 struct i915_gpu_error {
197 /* For hangcheck timer */
198 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
199 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
201 struct delayed_work hangcheck_work;
203 /* For reset and error_state handling. */
205 /* Protected by the above dev->gpu_error.lock. */
206 struct i915_gpu_state *first_error;
208 atomic_t pending_fb_pin;
210 unsigned long missed_irq_rings;
213 * State variable controlling the reset flow and count
215 * This is a counter which gets incremented when reset is triggered,
217 * Before the reset commences, the I915_RESET_BACKOFF bit is set
218 * meaning that any waiters holding onto the struct_mutex should
219 * relinquish the lock immediately in order for the reset to start.
221 * If reset is not completed successfully, the I915_WEDGE bit is
222 * set meaning that hardware is terminally sour and there is no
223 * recovery. All waiters on the reset_queue will be woken when
226 * This counter is used by the wait_seqno code to notice that reset
227 * event happened and it needs to restart the entire ioctl (since most
228 * likely the seqno it waited for won't ever signal anytime soon).
230 * This is important for lock-free wait paths, where no contended lock
231 * naturally enforces the correct ordering between the bail-out of the
232 * waiter and the gpu reset work code.
234 unsigned long reset_count;
237 * flags: Control various stages of the GPU reset
239 * #I915_RESET_BACKOFF - When we start a reset, we want to stop any
240 * other users acquiring the struct_mutex. To do this we set the
241 * #I915_RESET_BACKOFF bit in the error flags when we detect a reset
242 * and then check for that bit before acquiring the struct_mutex (in
243 * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
244 * secondary role in preventing two concurrent global reset attempts.
246 * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the
247 * struct_mutex. We try to acquire the struct_mutex in the reset worker,
248 * but it may be held by some long running waiter (that we cannot
249 * interrupt without causing trouble). Once we are ready to do the GPU
250 * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If
251 * they already hold the struct_mutex and want to participate they can
252 * inspect the bit and do the reset directly, otherwise the worker
253 * waits for the struct_mutex.
255 * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
256 * acquire the struct_mutex to reset an engine, we need an explicit
257 * flag to prevent two concurrent reset attempts in the same engine.
258 * As the number of engines continues to grow, allocate the flags from
259 * the most significant bits.
261 * #I915_WEDGED - If reset fails and we can no longer use the GPU,
262 * we set the #I915_WEDGED bit. Prior to command submission, e.g.
263 * i915_request_alloc(), this bit is checked and the sequence
264 * aborted (with -EIO reported to userspace) if set.
267 #define I915_RESET_BACKOFF 0
268 #define I915_RESET_HANDOFF 1
269 #define I915_RESET_MODESET 2
270 #define I915_WEDGED (BITS_PER_LONG - 1)
271 #define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES)
273 /** Number of times an engine has been reset */
274 u32 reset_engine_count[I915_NUM_ENGINES];
276 /** Set of stalled engines with guilty requests, in the current reset */
279 /** Reason for the current *global* reset */
283 * Waitqueue to signal when a hang is detected. Used to for waiters
284 * to release the struct_mutex for the reset to procede.
286 wait_queue_head_t wait_queue;
289 * Waitqueue to signal when the reset has completed. Used by clients
290 * that wait for dev_priv->mm.wedged to settle.
292 wait_queue_head_t reset_queue;
294 /* For missed irq/seqno simulation. */
295 unsigned long test_irq_rings;
298 struct drm_i915_error_state_buf {
299 struct drm_i915_private *i915;
308 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
311 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
312 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
313 const struct i915_gpu_state *gpu);
314 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
315 struct drm_i915_private *i915,
316 size_t count, loff_t pos);
319 i915_error_state_buf_release(struct drm_i915_error_state_buf *eb)
324 struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
325 void i915_capture_error_state(struct drm_i915_private *dev_priv,
327 const char *error_msg);
329 static inline struct i915_gpu_state *
330 i915_gpu_state_get(struct i915_gpu_state *gpu)
336 void __i915_gpu_state_free(struct kref *kref);
337 static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
340 kref_put(&gpu->ref, __i915_gpu_state_free);
343 struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
344 void i915_reset_error_state(struct drm_i915_private *i915);
348 static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
350 const char *error_msg)
354 static inline struct i915_gpu_state *
355 i915_first_error_state(struct drm_i915_private *i915)
360 static inline void i915_reset_error_state(struct drm_i915_private *i915)
364 #endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
366 #endif /* _I915_GPU_ERROR_H_ */