2 * Copyright © 2008-2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #ifndef I915_REQUEST_H
26 #define I915_REQUEST_H
28 #include <linux/dma-fence.h>
31 #include "i915_scheduler.h"
32 #include "i915_sw_fence.h"
33 #include "i915_scheduler.h"
35 #include <uapi/drm/i915_drm.h>
38 struct drm_i915_gem_object;
44 struct task_struct *tsk;
45 struct i915_request *request;
49 struct intel_signal_node {
50 struct intel_wait wait;
51 struct list_head link;
54 struct i915_capture_list {
55 struct i915_capture_list *next;
60 * Request queue structure.
62 * The request queue allows us to note sequence numbers that have been emitted
63 * and may be associated with active buffers to be retired.
65 * By keeping this list, we can avoid having to do questionable sequence
66 * number comparisons on buffer last_read|write_seqno. It also allows an
67 * emission time to be associated with the request for tracking how far ahead
68 * of the GPU the submission is.
70 * When modifying this structure be very aware that we perform a lockless
71 * RCU lookup of it that may race against reallocation of the struct
72 * from the slab freelist. We intentionally do not zero the structure on
73 * allocation so that the lookup can use the dangling pointers (and is
74 * cogniscent that those pointers may be wrong). Instead, everything that
75 * needs to be initialised must be done so explicitly.
77 * The requests are reference counted.
80 struct dma_fence fence;
83 /** On Which ring this request was generated */
84 struct drm_i915_private *i915;
87 * Context and ring buffer related to this request
88 * Contexts are refcounted, so when this request is associated with a
89 * context, we must increment the context's refcount, to guarantee that
90 * it persists while any request is linked to it. Requests themselves
91 * are also refcounted, so the request will only be freed when the last
92 * reference to it is dismissed, and the code in
93 * i915_request_free() will then decrement the refcount on the
96 struct i915_gem_context *gem_context;
97 struct intel_engine_cs *engine;
98 struct intel_context *hw_context;
99 struct intel_ring *ring;
100 struct i915_timeline *timeline;
101 struct intel_signal_node signaling;
104 * The rcu epoch of when this request was allocated. Used to judiciously
105 * apply backpressure on future allocations to ensure that under
106 * mempressure there is sufficient RCU ticks for us to reclaim our
107 * RCU protected slabs.
109 unsigned long rcustate;
112 * Fences for the various phases in the request's lifetime.
114 * The submit fence is used to await upon all of the request's
115 * dependencies. When it is signaled, the request is ready to run.
116 * It is used by the driver to then queue the request for execution.
118 struct i915_sw_fence submit;
119 wait_queue_entry_t submitq;
120 wait_queue_head_t execute;
123 * A list of everyone we wait upon, and everyone who waits upon us.
124 * Even though we will not be submitted to the hardware before the
125 * submit fence is signaled (it waits for all external events as well
126 * as our own requests), the scheduler still needs to know the
127 * dependency tree for the lifetime of the request (from execbuf
128 * to retirement), i.e. bidirectional dependency information for the
129 * request not tied to individual fences.
131 struct i915_sched_node sched;
132 struct i915_dependency dep;
135 * GEM sequence number associated with this request on the
136 * global execution timeline. It is zero when the request is not
137 * on the HW queue (i.e. not on the engine timeline list).
138 * Its value is guarded by the timeline spinlock.
142 /** Position in the ring of the start of the request */
145 /** Position in the ring of the start of the user packets */
149 * Position in the ring of the start of the postfix.
150 * This is required to calculate the maximum available ring space
151 * without overwriting the postfix.
155 /** Position in the ring of the end of the whole request */
158 /** Position in the ring of the end of any workarounds after the tail */
161 /** Preallocate space in the ring for the emitting the request */
164 /** Batch buffer related to this request if any (used for
165 * error state dump only).
167 struct i915_vma *batch;
169 * Additional buffers requested by userspace to be captured upon
170 * a GPU hang. The vma/obj on this list are protected by their
171 * active reference - all objects on this list must also be
172 * on the active_list (of their final request).
174 struct i915_capture_list *capture_list;
175 struct list_head active_list;
177 /** Time at which this request was emitted, in jiffies. */
178 unsigned long emitted_jiffies;
182 /** engine->request_list entry for this request */
183 struct list_head link;
185 /** ring->request_list entry for this request */
186 struct list_head ring_link;
188 struct drm_i915_file_private *file_priv;
189 /** file_priv list entry for this request */
190 struct list_head client_link;
193 #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
195 extern const struct dma_fence_ops i915_fence_ops;
197 static inline bool dma_fence_is_i915(const struct dma_fence *fence)
199 return fence->ops == &i915_fence_ops;
202 struct i915_request * __must_check
203 i915_request_alloc(struct intel_engine_cs *engine,
204 struct i915_gem_context *ctx);
205 void i915_request_retire_upto(struct i915_request *rq);
207 static inline struct i915_request *
208 to_request(struct dma_fence *fence)
210 /* We assume that NULL fence/request are interoperable */
211 BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
212 GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
213 return container_of(fence, struct i915_request, fence);
216 static inline struct i915_request *
217 i915_request_get(struct i915_request *rq)
219 return to_request(dma_fence_get(&rq->fence));
222 static inline struct i915_request *
223 i915_request_get_rcu(struct i915_request *rq)
225 return to_request(dma_fence_get_rcu(&rq->fence));
229 i915_request_put(struct i915_request *rq)
231 dma_fence_put(&rq->fence);
235 * i915_request_global_seqno - report the current global seqno
236 * @request - the request
238 * A request is assigned a global seqno only when it is on the hardware
239 * execution queue. The global seqno can be used to maintain a list of
240 * requests on the same engine in retirement order, for example for
241 * constructing a priority queue for waiting. Prior to its execution, or
242 * if it is subsequently removed in the event of preemption, its global
243 * seqno is zero. As both insertion and removal from the execution queue
244 * may operate in IRQ context, it is not guarded by the usual struct_mutex
245 * BKL. Instead those relying on the global seqno must be prepared for its
246 * value to change between reads. Only when the request is complete can
247 * the global seqno be stable (due to the memory barriers on submitting
248 * the commands to the hardware to write the breadcrumb, if the HWS shows
249 * that it has passed the global seqno and the global seqno is unchanged
250 * after the read, it is indeed complete).
253 i915_request_global_seqno(const struct i915_request *request)
255 return READ_ONCE(request->global_seqno);
258 int i915_request_await_object(struct i915_request *to,
259 struct drm_i915_gem_object *obj,
261 int i915_request_await_dma_fence(struct i915_request *rq,
262 struct dma_fence *fence);
264 void i915_request_add(struct i915_request *rq);
266 void __i915_request_submit(struct i915_request *request);
267 void i915_request_submit(struct i915_request *request);
269 void i915_request_skip(struct i915_request *request, int error);
271 void __i915_request_unsubmit(struct i915_request *request);
272 void i915_request_unsubmit(struct i915_request *request);
274 long i915_request_wait(struct i915_request *rq,
277 __attribute__((nonnull(1)));
278 #define I915_WAIT_INTERRUPTIBLE BIT(0)
279 #define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
280 #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
281 #define I915_WAIT_FOR_IDLE_BOOST BIT(3)
283 static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
285 static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
289 * Returns true if seq1 is later than seq2.
291 static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
293 return (s32)(seq1 - seq2) >= 0;
297 * i915_request_started - check if the request has begun being executed
300 * Returns true if the request has been submitted to hardware, and the hardware
301 * has advanced passed the end of the previous request and so should be either
302 * currently processing the request (though it may be preempted and so
303 * not necessarily the next request to complete) or have completed the request.
305 static inline bool i915_request_started(const struct i915_request *rq)
309 seqno = i915_request_global_seqno(rq);
310 if (!seqno) /* not yet submitted to HW */
313 return intel_engine_has_started(rq->engine, seqno);
317 __i915_request_completed(const struct i915_request *rq, u32 seqno)
320 return intel_engine_has_completed(rq->engine, seqno) &&
321 seqno == i915_request_global_seqno(rq);
324 static inline bool i915_request_completed(const struct i915_request *rq)
328 seqno = i915_request_global_seqno(rq);
332 return __i915_request_completed(rq, seqno);
335 static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
337 const struct i915_request *rq =
338 container_of(node, const struct i915_request, sched);
340 return i915_request_completed(rq);
343 void i915_retire_requests(struct drm_i915_private *i915);
346 * We treat requests as fences. This is not be to confused with our
347 * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
348 * We use the fences to synchronize access from the CPU with activity on the
349 * GPU, for example, we should not rewrite an object's PTE whilst the GPU
350 * is reading them. We also track fences at a higher level to provide
351 * implicit synchronisation around GEM objects, e.g. set-domain will wait
352 * for outstanding GPU rendering before marking the object ready for CPU
353 * access, or a pageflip will wait until the GPU is complete before showing
354 * the frame on the scanout.
356 * In order to use a fence, the object must track the fence it needs to
357 * serialise with. For example, GEM objects want to track both read and
358 * write access so that we can perform concurrent read operations between
359 * the CPU and GPU engines, as well as waiting for all rendering to
360 * complete, or waiting for the last GPU user of a "fence register". The
361 * object then embeds a #i915_gem_active to track the most recent (in
362 * retirement order) request relevant for the desired mode of access.
363 * The #i915_gem_active is updated with i915_gem_active_set() to track the
364 * most recent fence request, typically this is done as part of
365 * i915_vma_move_to_active().
367 * When the #i915_gem_active completes (is retired), it will
368 * signal its completion to the owner through a callback as well as mark
369 * itself as idle (i915_gem_active.request == NULL). The owner
370 * can then perform any action, such as delayed freeing of an active
371 * resource including itself.
373 struct i915_gem_active;
375 typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
376 struct i915_request *);
378 struct i915_gem_active {
379 struct i915_request __rcu *request;
380 struct list_head link;
381 i915_gem_retire_fn retire;
384 void i915_gem_retire_noop(struct i915_gem_active *,
385 struct i915_request *request);
388 * init_request_active - prepares the activity tracker for use
389 * @active - the active tracker
390 * @func - a callback when then the tracker is retired (becomes idle),
393 * init_request_active() prepares the embedded @active struct for use as
394 * an activity tracker, that is for tracking the last known active request
395 * associated with it. When the last request becomes idle, when it is retired
396 * after completion, the optional callback @func is invoked.
399 init_request_active(struct i915_gem_active *active,
400 i915_gem_retire_fn retire)
402 RCU_INIT_POINTER(active->request, NULL);
403 INIT_LIST_HEAD(&active->link);
404 active->retire = retire ?: i915_gem_retire_noop;
408 * i915_gem_active_set - updates the tracker to watch the current request
409 * @active - the active tracker
410 * @request - the request to watch
412 * i915_gem_active_set() watches the given @request for completion. Whilst
413 * that @request is busy, the @active reports busy. When that @request is
414 * retired, the @active tracker is updated to report idle.
417 i915_gem_active_set(struct i915_gem_active *active,
418 struct i915_request *request)
420 list_move(&active->link, &request->active_list);
421 rcu_assign_pointer(active->request, request);
425 * i915_gem_active_set_retire_fn - updates the retirement callback
426 * @active - the active tracker
427 * @fn - the routine called when the request is retired
428 * @mutex - struct_mutex used to guard retirements
430 * i915_gem_active_set_retire_fn() updates the function pointer that
431 * is called when the final request associated with the @active tracker
435 i915_gem_active_set_retire_fn(struct i915_gem_active *active,
436 i915_gem_retire_fn fn,
439 lockdep_assert_held(mutex);
440 active->retire = fn ?: i915_gem_retire_noop;
443 static inline struct i915_request *
444 __i915_gem_active_peek(const struct i915_gem_active *active)
447 * Inside the error capture (running with the driver in an unknown
448 * state), we want to bend the rules slightly (a lot).
450 * Work is in progress to make it safer, in the meantime this keeps
451 * the known issue from spamming the logs.
453 return rcu_dereference_protected(active->request, 1);
457 * i915_gem_active_raw - return the active request
458 * @active - the active tracker
460 * i915_gem_active_raw() returns the current request being tracked, or NULL.
461 * It does not obtain a reference on the request for the caller, so the caller
462 * must hold struct_mutex.
464 static inline struct i915_request *
465 i915_gem_active_raw(const struct i915_gem_active *active, struct mutex *mutex)
467 return rcu_dereference_protected(active->request,
468 lockdep_is_held(mutex));
472 * i915_gem_active_peek - report the active request being monitored
473 * @active - the active tracker
475 * i915_gem_active_peek() returns the current request being tracked if
476 * still active, or NULL. It does not obtain a reference on the request
477 * for the caller, so the caller must hold struct_mutex.
479 static inline struct i915_request *
480 i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
482 struct i915_request *request;
484 request = i915_gem_active_raw(active, mutex);
485 if (!request || i915_request_completed(request))
492 * i915_gem_active_get - return a reference to the active request
493 * @active - the active tracker
495 * i915_gem_active_get() returns a reference to the active request, or NULL
496 * if the active tracker is idle. The caller must hold struct_mutex.
498 static inline struct i915_request *
499 i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
501 return i915_request_get(i915_gem_active_peek(active, mutex));
505 * __i915_gem_active_get_rcu - return a reference to the active request
506 * @active - the active tracker
508 * __i915_gem_active_get() returns a reference to the active request, or NULL
509 * if the active tracker is idle. The caller must hold the RCU read lock, but
510 * the returned pointer is safe to use outside of RCU.
512 static inline struct i915_request *
513 __i915_gem_active_get_rcu(const struct i915_gem_active *active)
516 * Performing a lockless retrieval of the active request is super
517 * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
518 * slab of request objects will not be freed whilst we hold the
519 * RCU read lock. It does not guarantee that the request itself
520 * will not be freed and then *reused*. Viz,
524 * rq = active.request
525 * retire(rq) -> free(rq);
526 * (rq is now first on the slab freelist)
527 * active.request = NULL
529 * rq = new submission on a new object
532 * To prevent the request from being reused whilst the caller
533 * uses it, we take a reference like normal. Whilst acquiring
534 * the reference we check that it is not in a destroyed state
535 * (refcnt == 0). That prevents the request being reallocated
536 * whilst the caller holds on to it. To check that the request
537 * was not reallocated as we acquired the reference we have to
538 * check that our request remains the active request across
539 * the lookup, in the same manner as a seqlock. The visibility
540 * of the pointer versus the reference counting is controlled
541 * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
543 * In the middle of all that, we inspect whether the request is
544 * complete. Retiring is lazy so the request may be completed long
545 * before the active tracker is updated. Querying whether the
546 * request is complete is far cheaper (as it involves no locked
547 * instructions setting cachelines to exclusive) than acquiring
548 * the reference, so we do it first. The RCU read lock ensures the
549 * pointer dereference is valid, but does not ensure that the
550 * seqno nor HWS is the right one! However, if the request was
551 * reallocated, that means the active tracker's request was complete.
552 * If the new request is also complete, then both are and we can
553 * just report the active tracker is idle. If the new request is
554 * incomplete, then we acquire a reference on it and check that
555 * it remained the active request.
557 * It is then imperative that we do not zero the request on
558 * reallocation, so that we can chase the dangling pointers!
559 * See i915_request_alloc().
562 struct i915_request *request;
564 request = rcu_dereference(active->request);
565 if (!request || i915_request_completed(request))
569 * An especially silly compiler could decide to recompute the
570 * result of i915_request_completed, more specifically
571 * re-emit the load for request->fence.seqno. A race would catch
572 * a later seqno value, which could flip the result from true to
573 * false. Which means part of the instructions below might not
574 * be executed, while later on instructions are executed. Due to
575 * barriers within the refcounting the inconsistency can't reach
576 * past the call to i915_request_get_rcu, but not executing
577 * that while still executing i915_request_put() creates
578 * havoc enough. Prevent this with a compiler barrier.
582 request = i915_request_get_rcu(request);
585 * What stops the following rcu_access_pointer() from occurring
586 * before the above i915_request_get_rcu()? If we were
587 * to read the value before pausing to get the reference to
588 * the request, we may not notice a change in the active
591 * The rcu_access_pointer() is a mere compiler barrier, which
592 * means both the CPU and compiler are free to perform the
593 * memory read without constraint. The compiler only has to
594 * ensure that any operations after the rcu_access_pointer()
595 * occur afterwards in program order. This means the read may
596 * be performed earlier by an out-of-order CPU, or adventurous
599 * The atomic operation at the heart of
600 * i915_request_get_rcu(), see dma_fence_get_rcu(), is
601 * atomic_inc_not_zero() which is only a full memory barrier
602 * when successful. That is, if i915_request_get_rcu()
603 * returns the request (and so with the reference counted
604 * incremented) then the following read for rcu_access_pointer()
605 * must occur after the atomic operation and so confirm
606 * that this request is the one currently being tracked.
608 * The corresponding write barrier is part of
609 * rcu_assign_pointer().
611 if (!request || request == rcu_access_pointer(active->request))
612 return rcu_pointer_handoff(request);
614 i915_request_put(request);
619 * i915_gem_active_get_unlocked - return a reference to the active request
620 * @active - the active tracker
622 * i915_gem_active_get_unlocked() returns a reference to the active request,
623 * or NULL if the active tracker is idle. The reference is obtained under RCU,
624 * so no locking is required by the caller.
626 * The reference should be freed with i915_request_put().
628 static inline struct i915_request *
629 i915_gem_active_get_unlocked(const struct i915_gem_active *active)
631 struct i915_request *request;
634 request = __i915_gem_active_get_rcu(active);
641 * i915_gem_active_isset - report whether the active tracker is assigned
642 * @active - the active tracker
644 * i915_gem_active_isset() returns true if the active tracker is currently
645 * assigned to a request. Due to the lazy retiring, that request may be idle
646 * and this may report stale information.
649 i915_gem_active_isset(const struct i915_gem_active *active)
651 return rcu_access_pointer(active->request);
655 * i915_gem_active_wait - waits until the request is completed
656 * @active - the active request on which to wait
657 * @flags - how to wait
658 * @timeout - how long to wait at most
659 * @rps - userspace client to charge for a waitboost
661 * i915_gem_active_wait() waits until the request is completed before
662 * returning, without requiring any locks to be held. Note that it does not
663 * retire any requests before returning.
665 * This function relies on RCU in order to acquire the reference to the active
666 * request without holding any locks. See __i915_gem_active_get_rcu() for the
667 * glory details on how that is managed. Once the reference is acquired, we
668 * can then wait upon the request, and afterwards release our reference,
669 * free of any locking.
671 * This function wraps i915_request_wait(), see it for the full details on
674 * Returns 0 if successful, or a negative error code.
677 i915_gem_active_wait(const struct i915_gem_active *active, unsigned int flags)
679 struct i915_request *request;
682 request = i915_gem_active_get_unlocked(active);
684 ret = i915_request_wait(request, flags, MAX_SCHEDULE_TIMEOUT);
685 i915_request_put(request);
688 return ret < 0 ? ret : 0;
692 * i915_gem_active_retire - waits until the request is retired
693 * @active - the active request on which to wait
695 * i915_gem_active_retire() waits until the request is completed,
696 * and then ensures that at least the retirement handler for this
697 * @active tracker is called before returning. If the @active
698 * tracker is idle, the function returns immediately.
700 static inline int __must_check
701 i915_gem_active_retire(struct i915_gem_active *active,
704 struct i915_request *request;
707 request = i915_gem_active_raw(active, mutex);
711 ret = i915_request_wait(request,
712 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
713 MAX_SCHEDULE_TIMEOUT);
717 list_del_init(&active->link);
718 RCU_INIT_POINTER(active->request, NULL);
720 active->retire(active, request);
725 #define for_each_active(mask, idx) \
726 for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
728 #endif /* I915_REQUEST_H */