]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/i915_request.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
[linux.git] / drivers / gpu / drm / i915 / i915_request.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/prefetch.h>
26 #include <linux/dma-fence-array.h>
27 #include <linux/sched.h>
28 #include <linux/sched/clock.h>
29 #include <linux/sched/signal.h>
30
31 #include "i915_drv.h"
32
33 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
34 {
35         return "i915";
36 }
37
38 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
39 {
40         /*
41          * The timeline struct (as part of the ppgtt underneath a context)
42          * may be freed when the request is no longer in use by the GPU.
43          * We could extend the life of a context to beyond that of all
44          * fences, possibly keeping the hw resource around indefinitely,
45          * or we just give them a false name. Since
46          * dma_fence_ops.get_timeline_name is a debug feature, the occasional
47          * lie seems justifiable.
48          */
49         if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
50                 return "signaled";
51
52         return to_request(fence)->timeline->name;
53 }
54
55 static bool i915_fence_signaled(struct dma_fence *fence)
56 {
57         return i915_request_completed(to_request(fence));
58 }
59
60 static bool i915_fence_enable_signaling(struct dma_fence *fence)
61 {
62         return intel_engine_enable_signaling(to_request(fence), true);
63 }
64
65 static signed long i915_fence_wait(struct dma_fence *fence,
66                                    bool interruptible,
67                                    signed long timeout)
68 {
69         return i915_request_wait(to_request(fence), interruptible, timeout);
70 }
71
72 static void i915_fence_release(struct dma_fence *fence)
73 {
74         struct i915_request *rq = to_request(fence);
75
76         /*
77          * The request is put onto a RCU freelist (i.e. the address
78          * is immediately reused), mark the fences as being freed now.
79          * Otherwise the debugobjects for the fences are only marked as
80          * freed when the slab cache itself is freed, and so we would get
81          * caught trying to reuse dead objects.
82          */
83         i915_sw_fence_fini(&rq->submit);
84
85         kmem_cache_free(rq->i915->requests, rq);
86 }
87
88 const struct dma_fence_ops i915_fence_ops = {
89         .get_driver_name = i915_fence_get_driver_name,
90         .get_timeline_name = i915_fence_get_timeline_name,
91         .enable_signaling = i915_fence_enable_signaling,
92         .signaled = i915_fence_signaled,
93         .wait = i915_fence_wait,
94         .release = i915_fence_release,
95 };
96
97 static inline void
98 i915_request_remove_from_client(struct i915_request *request)
99 {
100         struct drm_i915_file_private *file_priv;
101
102         file_priv = request->file_priv;
103         if (!file_priv)
104                 return;
105
106         spin_lock(&file_priv->mm.lock);
107         if (request->file_priv) {
108                 list_del(&request->client_link);
109                 request->file_priv = NULL;
110         }
111         spin_unlock(&file_priv->mm.lock);
112 }
113
114 static struct i915_dependency *
115 i915_dependency_alloc(struct drm_i915_private *i915)
116 {
117         return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
118 }
119
120 static void
121 i915_dependency_free(struct drm_i915_private *i915,
122                      struct i915_dependency *dep)
123 {
124         kmem_cache_free(i915->dependencies, dep);
125 }
126
127 static void
128 __i915_sched_node_add_dependency(struct i915_sched_node *node,
129                                  struct i915_sched_node *signal,
130                                  struct i915_dependency *dep,
131                                  unsigned long flags)
132 {
133         INIT_LIST_HEAD(&dep->dfs_link);
134         list_add(&dep->wait_link, &signal->waiters_list);
135         list_add(&dep->signal_link, &node->signalers_list);
136         dep->signaler = signal;
137         dep->flags = flags;
138 }
139
140 static int
141 i915_sched_node_add_dependency(struct drm_i915_private *i915,
142                                struct i915_sched_node *node,
143                                struct i915_sched_node *signal)
144 {
145         struct i915_dependency *dep;
146
147         dep = i915_dependency_alloc(i915);
148         if (!dep)
149                 return -ENOMEM;
150
151         __i915_sched_node_add_dependency(node, signal, dep,
152                                          I915_DEPENDENCY_ALLOC);
153         return 0;
154 }
155
156 static void
157 i915_sched_node_fini(struct drm_i915_private *i915,
158                      struct i915_sched_node *node)
159 {
160         struct i915_dependency *dep, *tmp;
161
162         GEM_BUG_ON(!list_empty(&node->link));
163
164         /*
165          * Everyone we depended upon (the fences we wait to be signaled)
166          * should retire before us and remove themselves from our list.
167          * However, retirement is run independently on each timeline and
168          * so we may be called out-of-order.
169          */
170         list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
171                 GEM_BUG_ON(!i915_sched_node_signaled(dep->signaler));
172                 GEM_BUG_ON(!list_empty(&dep->dfs_link));
173
174                 list_del(&dep->wait_link);
175                 if (dep->flags & I915_DEPENDENCY_ALLOC)
176                         i915_dependency_free(i915, dep);
177         }
178
179         /* Remove ourselves from everyone who depends upon us */
180         list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
181                 GEM_BUG_ON(dep->signaler != node);
182                 GEM_BUG_ON(!list_empty(&dep->dfs_link));
183
184                 list_del(&dep->signal_link);
185                 if (dep->flags & I915_DEPENDENCY_ALLOC)
186                         i915_dependency_free(i915, dep);
187         }
188 }
189
190 static void
191 i915_sched_node_init(struct i915_sched_node *node)
192 {
193         INIT_LIST_HEAD(&node->signalers_list);
194         INIT_LIST_HEAD(&node->waiters_list);
195         INIT_LIST_HEAD(&node->link);
196         node->attr.priority = I915_PRIORITY_INVALID;
197 }
198
199 static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
200 {
201         struct intel_engine_cs *engine;
202         struct i915_timeline *timeline;
203         enum intel_engine_id id;
204         int ret;
205
206         /* Carefully retire all requests without writing to the rings */
207         ret = i915_gem_wait_for_idle(i915,
208                                      I915_WAIT_INTERRUPTIBLE |
209                                      I915_WAIT_LOCKED,
210                                      MAX_SCHEDULE_TIMEOUT);
211         if (ret)
212                 return ret;
213
214         GEM_BUG_ON(i915->gt.active_requests);
215
216         /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
217         for_each_engine(engine, i915, id) {
218                 GEM_TRACE("%s seqno %d (current %d) -> %d\n",
219                           engine->name,
220                           engine->timeline.seqno,
221                           intel_engine_get_seqno(engine),
222                           seqno);
223
224                 if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
225                         /* Flush any waiters before we reuse the seqno */
226                         intel_engine_disarm_breadcrumbs(engine);
227                         intel_engine_init_hangcheck(engine);
228                         GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
229                 }
230
231                 /* Check we are idle before we fiddle with hw state! */
232                 GEM_BUG_ON(!intel_engine_is_idle(engine));
233                 GEM_BUG_ON(i915_gem_active_isset(&engine->timeline.last_request));
234
235                 /* Finally reset hw state */
236                 intel_engine_init_global_seqno(engine, seqno);
237                 engine->timeline.seqno = seqno;
238         }
239
240         list_for_each_entry(timeline, &i915->gt.timelines, link)
241                 memset(timeline->global_sync, 0, sizeof(timeline->global_sync));
242
243         i915->gt.request_serial = seqno;
244
245         return 0;
246 }
247
248 int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
249 {
250         struct drm_i915_private *i915 = to_i915(dev);
251
252         lockdep_assert_held(&i915->drm.struct_mutex);
253
254         if (seqno == 0)
255                 return -EINVAL;
256
257         /* HWS page needs to be set less than what we will inject to ring */
258         return reset_all_global_seqno(i915, seqno - 1);
259 }
260
261 static int reserve_gt(struct drm_i915_private *i915)
262 {
263         int ret;
264
265         /*
266          * Reservation is fine until we may need to wrap around
267          *
268          * By incrementing the serial for every request, we know that no
269          * individual engine may exceed that serial (as each is reset to 0
270          * on any wrap). This protects even the most pessimistic of migrations
271          * of every request from all engines onto just one.
272          */
273         while (unlikely(++i915->gt.request_serial == 0)) {
274                 ret = reset_all_global_seqno(i915, 0);
275                 if (ret) {
276                         i915->gt.request_serial--;
277                         return ret;
278                 }
279         }
280
281         if (!i915->gt.active_requests++)
282                 i915_gem_unpark(i915);
283
284         return 0;
285 }
286
287 static void unreserve_gt(struct drm_i915_private *i915)
288 {
289         GEM_BUG_ON(!i915->gt.active_requests);
290         if (!--i915->gt.active_requests)
291                 i915_gem_park(i915);
292 }
293
294 void i915_gem_retire_noop(struct i915_gem_active *active,
295                           struct i915_request *request)
296 {
297         /* Space left intentionally blank */
298 }
299
300 static void advance_ring(struct i915_request *request)
301 {
302         struct intel_ring *ring = request->ring;
303         unsigned int tail;
304
305         /*
306          * We know the GPU must have read the request to have
307          * sent us the seqno + interrupt, so use the position
308          * of tail of the request to update the last known position
309          * of the GPU head.
310          *
311          * Note this requires that we are always called in request
312          * completion order.
313          */
314         GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list));
315         if (list_is_last(&request->ring_link, &ring->request_list)) {
316                 /*
317                  * We may race here with execlists resubmitting this request
318                  * as we retire it. The resubmission will move the ring->tail
319                  * forwards (to request->wa_tail). We either read the
320                  * current value that was written to hw, or the value that
321                  * is just about to be. Either works, if we miss the last two
322                  * noops - they are safe to be replayed on a reset.
323                  */
324                 GEM_TRACE("marking %s as inactive\n", ring->timeline->name);
325                 tail = READ_ONCE(request->tail);
326                 list_del(&ring->active_link);
327         } else {
328                 tail = request->postfix;
329         }
330         list_del_init(&request->ring_link);
331
332         ring->head = tail;
333 }
334
335 static void free_capture_list(struct i915_request *request)
336 {
337         struct i915_capture_list *capture;
338
339         capture = request->capture_list;
340         while (capture) {
341                 struct i915_capture_list *next = capture->next;
342
343                 kfree(capture);
344                 capture = next;
345         }
346 }
347
348 static void __retire_engine_request(struct intel_engine_cs *engine,
349                                     struct i915_request *rq)
350 {
351         GEM_TRACE("%s(%s) fence %llx:%d, global=%d, current %d\n",
352                   __func__, engine->name,
353                   rq->fence.context, rq->fence.seqno,
354                   rq->global_seqno,
355                   intel_engine_get_seqno(engine));
356
357         GEM_BUG_ON(!i915_request_completed(rq));
358
359         local_irq_disable();
360
361         spin_lock(&engine->timeline.lock);
362         GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests));
363         list_del_init(&rq->link);
364         spin_unlock(&engine->timeline.lock);
365
366         spin_lock(&rq->lock);
367         if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
368                 dma_fence_signal_locked(&rq->fence);
369         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
370                 intel_engine_cancel_signaling(rq);
371         if (rq->waitboost) {
372                 GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
373                 atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
374         }
375         spin_unlock(&rq->lock);
376
377         local_irq_enable();
378
379         /*
380          * The backing object for the context is done after switching to the
381          * *next* context. Therefore we cannot retire the previous context until
382          * the next context has already started running. However, since we
383          * cannot take the required locks at i915_request_submit() we
384          * defer the unpinning of the active context to now, retirement of
385          * the subsequent request.
386          */
387         if (engine->last_retired_context)
388                 intel_context_unpin(engine->last_retired_context);
389         engine->last_retired_context = rq->hw_context;
390 }
391
392 static void __retire_engine_upto(struct intel_engine_cs *engine,
393                                  struct i915_request *rq)
394 {
395         struct i915_request *tmp;
396
397         if (list_empty(&rq->link))
398                 return;
399
400         do {
401                 tmp = list_first_entry(&engine->timeline.requests,
402                                        typeof(*tmp), link);
403
404                 GEM_BUG_ON(tmp->engine != engine);
405                 __retire_engine_request(engine, tmp);
406         } while (tmp != rq);
407 }
408
409 static void i915_request_retire(struct i915_request *request)
410 {
411         struct i915_gem_active *active, *next;
412
413         GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
414                   request->engine->name,
415                   request->fence.context, request->fence.seqno,
416                   request->global_seqno,
417                   intel_engine_get_seqno(request->engine));
418
419         lockdep_assert_held(&request->i915->drm.struct_mutex);
420         GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
421         GEM_BUG_ON(!i915_request_completed(request));
422
423         trace_i915_request_retire(request);
424
425         advance_ring(request);
426         free_capture_list(request);
427
428         /*
429          * Walk through the active list, calling retire on each. This allows
430          * objects to track their GPU activity and mark themselves as idle
431          * when their *last* active request is completed (updating state
432          * tracking lists for eviction, active references for GEM, etc).
433          *
434          * As the ->retire() may free the node, we decouple it first and
435          * pass along the auxiliary information (to avoid dereferencing
436          * the node after the callback).
437          */
438         list_for_each_entry_safe(active, next, &request->active_list, link) {
439                 /*
440                  * In microbenchmarks or focusing upon time inside the kernel,
441                  * we may spend an inordinate amount of time simply handling
442                  * the retirement of requests and processing their callbacks.
443                  * Of which, this loop itself is particularly hot due to the
444                  * cache misses when jumping around the list of i915_gem_active.
445                  * So we try to keep this loop as streamlined as possible and
446                  * also prefetch the next i915_gem_active to try and hide
447                  * the likely cache miss.
448                  */
449                 prefetchw(next);
450
451                 INIT_LIST_HEAD(&active->link);
452                 RCU_INIT_POINTER(active->request, NULL);
453
454                 active->retire(active, request);
455         }
456
457         i915_request_remove_from_client(request);
458
459         /* Retirement decays the ban score as it is a sign of ctx progress */
460         atomic_dec_if_positive(&request->gem_context->ban_score);
461         intel_context_unpin(request->hw_context);
462
463         __retire_engine_upto(request->engine, request);
464
465         unreserve_gt(request->i915);
466
467         i915_sched_node_fini(request->i915, &request->sched);
468         i915_request_put(request);
469 }
470
471 void i915_request_retire_upto(struct i915_request *rq)
472 {
473         struct intel_ring *ring = rq->ring;
474         struct i915_request *tmp;
475
476         GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
477                   rq->engine->name,
478                   rq->fence.context, rq->fence.seqno,
479                   rq->global_seqno,
480                   intel_engine_get_seqno(rq->engine));
481
482         lockdep_assert_held(&rq->i915->drm.struct_mutex);
483         GEM_BUG_ON(!i915_request_completed(rq));
484
485         if (list_empty(&rq->ring_link))
486                 return;
487
488         do {
489                 tmp = list_first_entry(&ring->request_list,
490                                        typeof(*tmp), ring_link);
491
492                 i915_request_retire(tmp);
493         } while (tmp != rq);
494 }
495
496 static u32 timeline_get_seqno(struct i915_timeline *tl)
497 {
498         return ++tl->seqno;
499 }
500
501 static void move_to_timeline(struct i915_request *request,
502                              struct i915_timeline *timeline)
503 {
504         GEM_BUG_ON(request->timeline == &request->engine->timeline);
505         lockdep_assert_held(&request->engine->timeline.lock);
506
507         spin_lock(&request->timeline->lock);
508         list_move_tail(&request->link, &timeline->requests);
509         spin_unlock(&request->timeline->lock);
510 }
511
512 void __i915_request_submit(struct i915_request *request)
513 {
514         struct intel_engine_cs *engine = request->engine;
515         u32 seqno;
516
517         GEM_TRACE("%s fence %llx:%d -> global=%d, current %d\n",
518                   engine->name,
519                   request->fence.context, request->fence.seqno,
520                   engine->timeline.seqno + 1,
521                   intel_engine_get_seqno(engine));
522
523         GEM_BUG_ON(!irqs_disabled());
524         lockdep_assert_held(&engine->timeline.lock);
525
526         GEM_BUG_ON(request->global_seqno);
527
528         seqno = timeline_get_seqno(&engine->timeline);
529         GEM_BUG_ON(!seqno);
530         GEM_BUG_ON(intel_engine_signaled(engine, seqno));
531
532         /* We may be recursing from the signal callback of another i915 fence */
533         spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
534         request->global_seqno = seqno;
535         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
536                 intel_engine_enable_signaling(request, false);
537         spin_unlock(&request->lock);
538
539         engine->emit_breadcrumb(request,
540                                 request->ring->vaddr + request->postfix);
541
542         /* Transfer from per-context onto the global per-engine timeline */
543         move_to_timeline(request, &engine->timeline);
544
545         trace_i915_request_execute(request);
546
547         wake_up_all(&request->execute);
548 }
549
550 void i915_request_submit(struct i915_request *request)
551 {
552         struct intel_engine_cs *engine = request->engine;
553         unsigned long flags;
554
555         /* Will be called from irq-context when using foreign fences. */
556         spin_lock_irqsave(&engine->timeline.lock, flags);
557
558         __i915_request_submit(request);
559
560         spin_unlock_irqrestore(&engine->timeline.lock, flags);
561 }
562
563 void __i915_request_unsubmit(struct i915_request *request)
564 {
565         struct intel_engine_cs *engine = request->engine;
566
567         GEM_TRACE("%s fence %llx:%d <- global=%d, current %d\n",
568                   engine->name,
569                   request->fence.context, request->fence.seqno,
570                   request->global_seqno,
571                   intel_engine_get_seqno(engine));
572
573         GEM_BUG_ON(!irqs_disabled());
574         lockdep_assert_held(&engine->timeline.lock);
575
576         /*
577          * Only unwind in reverse order, required so that the per-context list
578          * is kept in seqno/ring order.
579          */
580         GEM_BUG_ON(!request->global_seqno);
581         GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
582         GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno));
583         engine->timeline.seqno--;
584
585         /* We may be recursing from the signal callback of another i915 fence */
586         spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
587         request->global_seqno = 0;
588         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
589                 intel_engine_cancel_signaling(request);
590         spin_unlock(&request->lock);
591
592         /* Transfer back from the global per-engine timeline to per-context */
593         move_to_timeline(request, request->timeline);
594
595         /*
596          * We don't need to wake_up any waiters on request->execute, they
597          * will get woken by any other event or us re-adding this request
598          * to the engine timeline (__i915_request_submit()). The waiters
599          * should be quite adapt at finding that the request now has a new
600          * global_seqno to the one they went to sleep on.
601          */
602 }
603
604 void i915_request_unsubmit(struct i915_request *request)
605 {
606         struct intel_engine_cs *engine = request->engine;
607         unsigned long flags;
608
609         /* Will be called from irq-context when using foreign fences. */
610         spin_lock_irqsave(&engine->timeline.lock, flags);
611
612         __i915_request_unsubmit(request);
613
614         spin_unlock_irqrestore(&engine->timeline.lock, flags);
615 }
616
617 static int __i915_sw_fence_call
618 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
619 {
620         struct i915_request *request =
621                 container_of(fence, typeof(*request), submit);
622
623         switch (state) {
624         case FENCE_COMPLETE:
625                 trace_i915_request_submit(request);
626                 /*
627                  * We need to serialize use of the submit_request() callback
628                  * with its hotplugging performed during an emergency
629                  * i915_gem_set_wedged().  We use the RCU mechanism to mark the
630                  * critical section in order to force i915_gem_set_wedged() to
631                  * wait until the submit_request() is completed before
632                  * proceeding.
633                  */
634                 rcu_read_lock();
635                 request->engine->submit_request(request);
636                 rcu_read_unlock();
637                 break;
638
639         case FENCE_FREE:
640                 i915_request_put(request);
641                 break;
642         }
643
644         return NOTIFY_DONE;
645 }
646
647 /**
648  * i915_request_alloc - allocate a request structure
649  *
650  * @engine: engine that we wish to issue the request on.
651  * @ctx: context that the request will be associated with.
652  *
653  * Returns a pointer to the allocated request if successful,
654  * or an error code if not.
655  */
656 struct i915_request *
657 i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
658 {
659         struct drm_i915_private *i915 = engine->i915;
660         struct i915_request *rq;
661         struct intel_context *ce;
662         int ret;
663
664         lockdep_assert_held(&i915->drm.struct_mutex);
665
666         /*
667          * Preempt contexts are reserved for exclusive use to inject a
668          * preemption context switch. They are never to be used for any trivial
669          * request!
670          */
671         GEM_BUG_ON(ctx == i915->preempt_context);
672
673         /*
674          * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
675          * EIO if the GPU is already wedged.
676          */
677         if (i915_terminally_wedged(&i915->gpu_error))
678                 return ERR_PTR(-EIO);
679
680         /*
681          * Pinning the contexts may generate requests in order to acquire
682          * GGTT space, so do this first before we reserve a seqno for
683          * ourselves.
684          */
685         ce = intel_context_pin(ctx, engine);
686         if (IS_ERR(ce))
687                 return ERR_CAST(ce);
688
689         ret = reserve_gt(i915);
690         if (ret)
691                 goto err_unpin;
692
693         ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
694         if (ret)
695                 goto err_unreserve;
696
697         /* Move our oldest request to the slab-cache (if not in use!) */
698         rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
699         if (!list_is_last(&rq->ring_link, &ce->ring->request_list) &&
700             i915_request_completed(rq))
701                 i915_request_retire(rq);
702
703         /*
704          * Beware: Dragons be flying overhead.
705          *
706          * We use RCU to look up requests in flight. The lookups may
707          * race with the request being allocated from the slab freelist.
708          * That is the request we are writing to here, may be in the process
709          * of being read by __i915_gem_active_get_rcu(). As such,
710          * we have to be very careful when overwriting the contents. During
711          * the RCU lookup, we change chase the request->engine pointer,
712          * read the request->global_seqno and increment the reference count.
713          *
714          * The reference count is incremented atomically. If it is zero,
715          * the lookup knows the request is unallocated and complete. Otherwise,
716          * it is either still in use, or has been reallocated and reset
717          * with dma_fence_init(). This increment is safe for release as we
718          * check that the request we have a reference to and matches the active
719          * request.
720          *
721          * Before we increment the refcount, we chase the request->engine
722          * pointer. We must not call kmem_cache_zalloc() or else we set
723          * that pointer to NULL and cause a crash during the lookup. If
724          * we see the request is completed (based on the value of the
725          * old engine and seqno), the lookup is complete and reports NULL.
726          * If we decide the request is not completed (new engine or seqno),
727          * then we grab a reference and double check that it is still the
728          * active request - which it won't be and restart the lookup.
729          *
730          * Do not use kmem_cache_zalloc() here!
731          */
732         rq = kmem_cache_alloc(i915->requests,
733                               GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
734         if (unlikely(!rq)) {
735                 i915_retire_requests(i915);
736
737                 /* Ratelimit ourselves to prevent oom from malicious clients */
738                 rq = i915_gem_active_raw(&ce->ring->timeline->last_request,
739                                          &i915->drm.struct_mutex);
740                 if (rq)
741                         cond_synchronize_rcu(rq->rcustate);
742
743                 /*
744                  * We've forced the client to stall and catch up with whatever
745                  * backlog there might have been. As we are assuming that we
746                  * caused the mempressure, now is an opportune time to
747                  * recover as much memory from the request pool as is possible.
748                  * Having already penalized the client to stall, we spend
749                  * a little extra time to re-optimise page allocation.
750                  */
751                 kmem_cache_shrink(i915->requests);
752                 rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */
753
754                 rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
755                 if (!rq) {
756                         ret = -ENOMEM;
757                         goto err_unreserve;
758                 }
759         }
760
761         rq->rcustate = get_state_synchronize_rcu();
762
763         INIT_LIST_HEAD(&rq->active_list);
764         rq->i915 = i915;
765         rq->engine = engine;
766         rq->gem_context = ctx;
767         rq->hw_context = ce;
768         rq->ring = ce->ring;
769         rq->timeline = ce->ring->timeline;
770         GEM_BUG_ON(rq->timeline == &engine->timeline);
771
772         spin_lock_init(&rq->lock);
773         dma_fence_init(&rq->fence,
774                        &i915_fence_ops,
775                        &rq->lock,
776                        rq->timeline->fence_context,
777                        timeline_get_seqno(rq->timeline));
778
779         /* We bump the ref for the fence chain */
780         i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
781         init_waitqueue_head(&rq->execute);
782
783         i915_sched_node_init(&rq->sched);
784
785         /* No zalloc, must clear what we need by hand */
786         rq->global_seqno = 0;
787         rq->signaling.wait.seqno = 0;
788         rq->file_priv = NULL;
789         rq->batch = NULL;
790         rq->capture_list = NULL;
791         rq->waitboost = false;
792
793         /*
794          * Reserve space in the ring buffer for all the commands required to
795          * eventually emit this request. This is to guarantee that the
796          * i915_request_add() call can't fail. Note that the reserve may need
797          * to be redone if the request is not actually submitted straight
798          * away, e.g. because a GPU scheduler has deferred it.
799          */
800         rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
801         GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz);
802
803         /*
804          * Record the position of the start of the request so that
805          * should we detect the updated seqno part-way through the
806          * GPU processing the request, we never over-estimate the
807          * position of the head.
808          */
809         rq->head = rq->ring->emit;
810
811         /* Unconditionally invalidate GPU caches and TLBs. */
812         ret = engine->emit_flush(rq, EMIT_INVALIDATE);
813         if (ret)
814                 goto err_unwind;
815
816         ret = engine->request_alloc(rq);
817         if (ret)
818                 goto err_unwind;
819
820         /* Keep a second pin for the dual retirement along engine and ring */
821         __intel_context_pin(ce);
822
823         rq->infix = rq->ring->emit; /* end of header; start of user payload */
824
825         /* Check that we didn't interrupt ourselves with a new request */
826         GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
827         return rq;
828
829 err_unwind:
830         ce->ring->emit = rq->head;
831
832         /* Make sure we didn't add ourselves to external state before freeing */
833         GEM_BUG_ON(!list_empty(&rq->active_list));
834         GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
835         GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
836
837         kmem_cache_free(i915->requests, rq);
838 err_unreserve:
839         unreserve_gt(i915);
840 err_unpin:
841         intel_context_unpin(ce);
842         return ERR_PTR(ret);
843 }
844
845 static int
846 i915_request_await_request(struct i915_request *to, struct i915_request *from)
847 {
848         int ret;
849
850         GEM_BUG_ON(to == from);
851         GEM_BUG_ON(to->timeline == from->timeline);
852
853         if (i915_request_completed(from))
854                 return 0;
855
856         if (to->engine->schedule) {
857                 ret = i915_sched_node_add_dependency(to->i915,
858                                                      &to->sched,
859                                                      &from->sched);
860                 if (ret < 0)
861                         return ret;
862         }
863
864         if (to->engine == from->engine) {
865                 ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
866                                                        &from->submit,
867                                                        I915_FENCE_GFP);
868                 return ret < 0 ? ret : 0;
869         }
870
871         if (to->engine->semaphore.sync_to) {
872                 u32 seqno;
873
874                 GEM_BUG_ON(!from->engine->semaphore.signal);
875
876                 seqno = i915_request_global_seqno(from);
877                 if (!seqno)
878                         goto await_dma_fence;
879
880                 if (seqno <= to->timeline->global_sync[from->engine->id])
881                         return 0;
882
883                 trace_i915_gem_ring_sync_to(to, from);
884                 ret = to->engine->semaphore.sync_to(to, from);
885                 if (ret)
886                         return ret;
887
888                 to->timeline->global_sync[from->engine->id] = seqno;
889                 return 0;
890         }
891
892 await_dma_fence:
893         ret = i915_sw_fence_await_dma_fence(&to->submit,
894                                             &from->fence, 0,
895                                             I915_FENCE_GFP);
896         return ret < 0 ? ret : 0;
897 }
898
899 int
900 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
901 {
902         struct dma_fence **child = &fence;
903         unsigned int nchild = 1;
904         int ret;
905
906         /*
907          * Note that if the fence-array was created in signal-on-any mode,
908          * we should *not* decompose it into its individual fences. However,
909          * we don't currently store which mode the fence-array is operating
910          * in. Fortunately, the only user of signal-on-any is private to
911          * amdgpu and we should not see any incoming fence-array from
912          * sync-file being in signal-on-any mode.
913          */
914         if (dma_fence_is_array(fence)) {
915                 struct dma_fence_array *array = to_dma_fence_array(fence);
916
917                 child = array->fences;
918                 nchild = array->num_fences;
919                 GEM_BUG_ON(!nchild);
920         }
921
922         do {
923                 fence = *child++;
924                 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
925                         continue;
926
927                 /*
928                  * Requests on the same timeline are explicitly ordered, along
929                  * with their dependencies, by i915_request_add() which ensures
930                  * that requests are submitted in-order through each ring.
931                  */
932                 if (fence->context == rq->fence.context)
933                         continue;
934
935                 /* Squash repeated waits to the same timelines */
936                 if (fence->context != rq->i915->mm.unordered_timeline &&
937                     i915_timeline_sync_is_later(rq->timeline, fence))
938                         continue;
939
940                 if (dma_fence_is_i915(fence))
941                         ret = i915_request_await_request(rq, to_request(fence));
942                 else
943                         ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
944                                                             I915_FENCE_TIMEOUT,
945                                                             I915_FENCE_GFP);
946                 if (ret < 0)
947                         return ret;
948
949                 /* Record the latest fence used against each timeline */
950                 if (fence->context != rq->i915->mm.unordered_timeline)
951                         i915_timeline_sync_set(rq->timeline, fence);
952         } while (--nchild);
953
954         return 0;
955 }
956
957 /**
958  * i915_request_await_object - set this request to (async) wait upon a bo
959  * @to: request we are wishing to use
960  * @obj: object which may be in use on another ring.
961  * @write: whether the wait is on behalf of a writer
962  *
963  * This code is meant to abstract object synchronization with the GPU.
964  * Conceptually we serialise writes between engines inside the GPU.
965  * We only allow one engine to write into a buffer at any time, but
966  * multiple readers. To ensure each has a coherent view of memory, we must:
967  *
968  * - If there is an outstanding write request to the object, the new
969  *   request must wait for it to complete (either CPU or in hw, requests
970  *   on the same ring will be naturally ordered).
971  *
972  * - If we are a write request (pending_write_domain is set), the new
973  *   request must wait for outstanding read requests to complete.
974  *
975  * Returns 0 if successful, else propagates up the lower layer error.
976  */
977 int
978 i915_request_await_object(struct i915_request *to,
979                           struct drm_i915_gem_object *obj,
980                           bool write)
981 {
982         struct dma_fence *excl;
983         int ret = 0;
984
985         if (write) {
986                 struct dma_fence **shared;
987                 unsigned int count, i;
988
989                 ret = reservation_object_get_fences_rcu(obj->resv,
990                                                         &excl, &count, &shared);
991                 if (ret)
992                         return ret;
993
994                 for (i = 0; i < count; i++) {
995                         ret = i915_request_await_dma_fence(to, shared[i]);
996                         if (ret)
997                                 break;
998
999                         dma_fence_put(shared[i]);
1000                 }
1001
1002                 for (; i < count; i++)
1003                         dma_fence_put(shared[i]);
1004                 kfree(shared);
1005         } else {
1006                 excl = reservation_object_get_excl_rcu(obj->resv);
1007         }
1008
1009         if (excl) {
1010                 if (ret == 0)
1011                         ret = i915_request_await_dma_fence(to, excl);
1012
1013                 dma_fence_put(excl);
1014         }
1015
1016         return ret;
1017 }
1018
1019 void i915_request_skip(struct i915_request *rq, int error)
1020 {
1021         void *vaddr = rq->ring->vaddr;
1022         u32 head;
1023
1024         GEM_BUG_ON(!IS_ERR_VALUE((long)error));
1025         dma_fence_set_error(&rq->fence, error);
1026
1027         /*
1028          * As this request likely depends on state from the lost
1029          * context, clear out all the user operations leaving the
1030          * breadcrumb at the end (so we get the fence notifications).
1031          */
1032         head = rq->infix;
1033         if (rq->postfix < head) {
1034                 memset(vaddr + head, 0, rq->ring->size - head);
1035                 head = 0;
1036         }
1037         memset(vaddr + head, 0, rq->postfix - head);
1038 }
1039
1040 /*
1041  * NB: This function is not allowed to fail. Doing so would mean the the
1042  * request is not being tracked for completion but the work itself is
1043  * going to happen on the hardware. This would be a Bad Thing(tm).
1044  */
1045 void i915_request_add(struct i915_request *request)
1046 {
1047         struct intel_engine_cs *engine = request->engine;
1048         struct i915_timeline *timeline = request->timeline;
1049         struct intel_ring *ring = request->ring;
1050         struct i915_request *prev;
1051         u32 *cs;
1052
1053         GEM_TRACE("%s fence %llx:%d\n",
1054                   engine->name, request->fence.context, request->fence.seqno);
1055
1056         lockdep_assert_held(&request->i915->drm.struct_mutex);
1057         trace_i915_request_add(request);
1058
1059         /*
1060          * Make sure that no request gazumped us - if it was allocated after
1061          * our i915_request_alloc() and called __i915_request_add() before
1062          * us, the timeline will hold its seqno which is later than ours.
1063          */
1064         GEM_BUG_ON(timeline->seqno != request->fence.seqno);
1065
1066         /*
1067          * To ensure that this call will not fail, space for its emissions
1068          * should already have been reserved in the ring buffer. Let the ring
1069          * know that it is time to use that space up.
1070          */
1071         request->reserved_space = 0;
1072         engine->emit_flush(request, EMIT_FLUSH);
1073
1074         /*
1075          * Record the position of the start of the breadcrumb so that
1076          * should we detect the updated seqno part-way through the
1077          * GPU processing the request, we never over-estimate the
1078          * position of the ring's HEAD.
1079          */
1080         cs = intel_ring_begin(request, engine->emit_breadcrumb_sz);
1081         GEM_BUG_ON(IS_ERR(cs));
1082         request->postfix = intel_ring_offset(request, cs);
1083
1084         /*
1085          * Seal the request and mark it as pending execution. Note that
1086          * we may inspect this state, without holding any locks, during
1087          * hangcheck. Hence we apply the barrier to ensure that we do not
1088          * see a more recent value in the hws than we are tracking.
1089          */
1090
1091         prev = i915_gem_active_raw(&timeline->last_request,
1092                                    &request->i915->drm.struct_mutex);
1093         if (prev && !i915_request_completed(prev)) {
1094                 i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
1095                                              &request->submitq);
1096                 if (engine->schedule)
1097                         __i915_sched_node_add_dependency(&request->sched,
1098                                                          &prev->sched,
1099                                                          &request->dep,
1100                                                          0);
1101         }
1102
1103         spin_lock_irq(&timeline->lock);
1104         list_add_tail(&request->link, &timeline->requests);
1105         spin_unlock_irq(&timeline->lock);
1106
1107         GEM_BUG_ON(timeline->seqno != request->fence.seqno);
1108         i915_gem_active_set(&timeline->last_request, request);
1109
1110         list_add_tail(&request->ring_link, &ring->request_list);
1111         if (list_is_first(&request->ring_link, &ring->request_list)) {
1112                 GEM_TRACE("marking %s as active\n", ring->timeline->name);
1113                 list_add(&ring->active_link, &request->i915->gt.active_rings);
1114         }
1115         request->emitted_jiffies = jiffies;
1116
1117         /*
1118          * Let the backend know a new request has arrived that may need
1119          * to adjust the existing execution schedule due to a high priority
1120          * request - i.e. we may want to preempt the current request in order
1121          * to run a high priority dependency chain *before* we can execute this
1122          * request.
1123          *
1124          * This is called before the request is ready to run so that we can
1125          * decide whether to preempt the entire chain so that it is ready to
1126          * run at the earliest possible convenience.
1127          */
1128         local_bh_disable();
1129         rcu_read_lock(); /* RCU serialisation for set-wedged protection */
1130         if (engine->schedule)
1131                 engine->schedule(request, &request->gem_context->sched);
1132         rcu_read_unlock();
1133         i915_sw_fence_commit(&request->submit);
1134         local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
1135
1136         /*
1137          * In typical scenarios, we do not expect the previous request on
1138          * the timeline to be still tracked by timeline->last_request if it
1139          * has been completed. If the completed request is still here, that
1140          * implies that request retirement is a long way behind submission,
1141          * suggesting that we haven't been retiring frequently enough from
1142          * the combination of retire-before-alloc, waiters and the background
1143          * retirement worker. So if the last request on this timeline was
1144          * already completed, do a catch up pass, flushing the retirement queue
1145          * up to this client. Since we have now moved the heaviest operations
1146          * during retirement onto secondary workers, such as freeing objects
1147          * or contexts, retiring a bunch of requests is mostly list management
1148          * (and cache misses), and so we should not be overly penalizing this
1149          * client by performing excess work, though we may still performing
1150          * work on behalf of others -- but instead we should benefit from
1151          * improved resource management. (Well, that's the theory at least.)
1152          */
1153         if (prev && i915_request_completed(prev))
1154                 i915_request_retire_upto(prev);
1155 }
1156
1157 static unsigned long local_clock_us(unsigned int *cpu)
1158 {
1159         unsigned long t;
1160
1161         /*
1162          * Cheaply and approximately convert from nanoseconds to microseconds.
1163          * The result and subsequent calculations are also defined in the same
1164          * approximate microseconds units. The principal source of timing
1165          * error here is from the simple truncation.
1166          *
1167          * Note that local_clock() is only defined wrt to the current CPU;
1168          * the comparisons are no longer valid if we switch CPUs. Instead of
1169          * blocking preemption for the entire busywait, we can detect the CPU
1170          * switch and use that as indicator of system load and a reason to
1171          * stop busywaiting, see busywait_stop().
1172          */
1173         *cpu = get_cpu();
1174         t = local_clock() >> 10;
1175         put_cpu();
1176
1177         return t;
1178 }
1179
1180 static bool busywait_stop(unsigned long timeout, unsigned int cpu)
1181 {
1182         unsigned int this_cpu;
1183
1184         if (time_after(local_clock_us(&this_cpu), timeout))
1185                 return true;
1186
1187         return this_cpu != cpu;
1188 }
1189
1190 static bool __i915_spin_request(const struct i915_request *rq,
1191                                 u32 seqno, int state, unsigned long timeout_us)
1192 {
1193         struct intel_engine_cs *engine = rq->engine;
1194         unsigned int irq, cpu;
1195
1196         GEM_BUG_ON(!seqno);
1197
1198         /*
1199          * Only wait for the request if we know it is likely to complete.
1200          *
1201          * We don't track the timestamps around requests, nor the average
1202          * request length, so we do not have a good indicator that this
1203          * request will complete within the timeout. What we do know is the
1204          * order in which requests are executed by the engine and so we can
1205          * tell if the request has started. If the request hasn't started yet,
1206          * it is a fair assumption that it will not complete within our
1207          * relatively short timeout.
1208          */
1209         if (!intel_engine_has_started(engine, seqno))
1210                 return false;
1211
1212         /*
1213          * When waiting for high frequency requests, e.g. during synchronous
1214          * rendering split between the CPU and GPU, the finite amount of time
1215          * required to set up the irq and wait upon it limits the response
1216          * rate. By busywaiting on the request completion for a short while we
1217          * can service the high frequency waits as quick as possible. However,
1218          * if it is a slow request, we want to sleep as quickly as possible.
1219          * The tradeoff between waiting and sleeping is roughly the time it
1220          * takes to sleep on a request, on the order of a microsecond.
1221          */
1222
1223         irq = READ_ONCE(engine->breadcrumbs.irq_count);
1224         timeout_us += local_clock_us(&cpu);
1225         do {
1226                 if (intel_engine_has_completed(engine, seqno))
1227                         return seqno == i915_request_global_seqno(rq);
1228
1229                 /*
1230                  * Seqno are meant to be ordered *before* the interrupt. If
1231                  * we see an interrupt without a corresponding seqno advance,
1232                  * assume we won't see one in the near future but require
1233                  * the engine->seqno_barrier() to fixup coherency.
1234                  */
1235                 if (READ_ONCE(engine->breadcrumbs.irq_count) != irq)
1236                         break;
1237
1238                 if (signal_pending_state(state, current))
1239                         break;
1240
1241                 if (busywait_stop(timeout_us, cpu))
1242                         break;
1243
1244                 cpu_relax();
1245         } while (!need_resched());
1246
1247         return false;
1248 }
1249
1250 static bool __i915_wait_request_check_and_reset(struct i915_request *request)
1251 {
1252         struct i915_gpu_error *error = &request->i915->gpu_error;
1253
1254         if (likely(!i915_reset_handoff(error)))
1255                 return false;
1256
1257         __set_current_state(TASK_RUNNING);
1258         i915_reset(request->i915, error->stalled_mask, error->reason);
1259         return true;
1260 }
1261
1262 /**
1263  * i915_request_wait - wait until execution of request has finished
1264  * @rq: the request to wait upon
1265  * @flags: how to wait
1266  * @timeout: how long to wait in jiffies
1267  *
1268  * i915_request_wait() waits for the request to be completed, for a
1269  * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1270  * unbounded wait).
1271  *
1272  * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
1273  * in via the flags, and vice versa if the struct_mutex is not held, the caller
1274  * must not specify that the wait is locked.
1275  *
1276  * Returns the remaining time (in jiffies) if the request completed, which may
1277  * be zero or -ETIME if the request is unfinished after the timeout expires.
1278  * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1279  * pending before the request completes.
1280  */
1281 long i915_request_wait(struct i915_request *rq,
1282                        unsigned int flags,
1283                        long timeout)
1284 {
1285         const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1286                 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1287         wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue;
1288         DEFINE_WAIT_FUNC(reset, default_wake_function);
1289         DEFINE_WAIT_FUNC(exec, default_wake_function);
1290         struct intel_wait wait;
1291
1292         might_sleep();
1293 #if IS_ENABLED(CONFIG_LOCKDEP)
1294         GEM_BUG_ON(debug_locks &&
1295                    !!lockdep_is_held(&rq->i915->drm.struct_mutex) !=
1296                    !!(flags & I915_WAIT_LOCKED));
1297 #endif
1298         GEM_BUG_ON(timeout < 0);
1299
1300         if (i915_request_completed(rq))
1301                 return timeout;
1302
1303         if (!timeout)
1304                 return -ETIME;
1305
1306         trace_i915_request_wait_begin(rq, flags);
1307
1308         add_wait_queue(&rq->execute, &exec);
1309         if (flags & I915_WAIT_LOCKED)
1310                 add_wait_queue(errq, &reset);
1311
1312         intel_wait_init(&wait);
1313
1314 restart:
1315         do {
1316                 set_current_state(state);
1317                 if (intel_wait_update_request(&wait, rq))
1318                         break;
1319
1320                 if (flags & I915_WAIT_LOCKED &&
1321                     __i915_wait_request_check_and_reset(rq))
1322                         continue;
1323
1324                 if (signal_pending_state(state, current)) {
1325                         timeout = -ERESTARTSYS;
1326                         goto complete;
1327                 }
1328
1329                 if (!timeout) {
1330                         timeout = -ETIME;
1331                         goto complete;
1332                 }
1333
1334                 timeout = io_schedule_timeout(timeout);
1335         } while (1);
1336
1337         GEM_BUG_ON(!intel_wait_has_seqno(&wait));
1338         GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
1339
1340         /* Optimistic short spin before touching IRQs */
1341         if (__i915_spin_request(rq, wait.seqno, state, 5))
1342                 goto complete;
1343
1344         set_current_state(state);
1345         if (intel_engine_add_wait(rq->engine, &wait))
1346                 /*
1347                  * In order to check that we haven't missed the interrupt
1348                  * as we enabled it, we need to kick ourselves to do a
1349                  * coherent check on the seqno before we sleep.
1350                  */
1351                 goto wakeup;
1352
1353         if (flags & I915_WAIT_LOCKED)
1354                 __i915_wait_request_check_and_reset(rq);
1355
1356         for (;;) {
1357                 if (signal_pending_state(state, current)) {
1358                         timeout = -ERESTARTSYS;
1359                         break;
1360                 }
1361
1362                 if (!timeout) {
1363                         timeout = -ETIME;
1364                         break;
1365                 }
1366
1367                 timeout = io_schedule_timeout(timeout);
1368
1369                 if (intel_wait_complete(&wait) &&
1370                     intel_wait_check_request(&wait, rq))
1371                         break;
1372
1373                 set_current_state(state);
1374
1375 wakeup:
1376                 /*
1377                  * Carefully check if the request is complete, giving time
1378                  * for the seqno to be visible following the interrupt.
1379                  * We also have to check in case we are kicked by the GPU
1380                  * reset in order to drop the struct_mutex.
1381                  */
1382                 if (__i915_request_irq_complete(rq))
1383                         break;
1384
1385                 /*
1386                  * If the GPU is hung, and we hold the lock, reset the GPU
1387                  * and then check for completion. On a full reset, the engine's
1388                  * HW seqno will be advanced passed us and we are complete.
1389                  * If we do a partial reset, we have to wait for the GPU to
1390                  * resume and update the breadcrumb.
1391                  *
1392                  * If we don't hold the mutex, we can just wait for the worker
1393                  * to come along and update the breadcrumb (either directly
1394                  * itself, or indirectly by recovering the GPU).
1395                  */
1396                 if (flags & I915_WAIT_LOCKED &&
1397                     __i915_wait_request_check_and_reset(rq))
1398                         continue;
1399
1400                 /* Only spin if we know the GPU is processing this request */
1401                 if (__i915_spin_request(rq, wait.seqno, state, 2))
1402                         break;
1403
1404                 if (!intel_wait_check_request(&wait, rq)) {
1405                         intel_engine_remove_wait(rq->engine, &wait);
1406                         goto restart;
1407                 }
1408         }
1409
1410         intel_engine_remove_wait(rq->engine, &wait);
1411 complete:
1412         __set_current_state(TASK_RUNNING);
1413         if (flags & I915_WAIT_LOCKED)
1414                 remove_wait_queue(errq, &reset);
1415         remove_wait_queue(&rq->execute, &exec);
1416         trace_i915_request_wait_end(rq);
1417
1418         return timeout;
1419 }
1420
1421 static void ring_retire_requests(struct intel_ring *ring)
1422 {
1423         struct i915_request *request, *next;
1424
1425         list_for_each_entry_safe(request, next,
1426                                  &ring->request_list, ring_link) {
1427                 if (!i915_request_completed(request))
1428                         break;
1429
1430                 i915_request_retire(request);
1431         }
1432 }
1433
1434 void i915_retire_requests(struct drm_i915_private *i915)
1435 {
1436         struct intel_ring *ring, *tmp;
1437
1438         lockdep_assert_held(&i915->drm.struct_mutex);
1439
1440         if (!i915->gt.active_requests)
1441                 return;
1442
1443         list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link)
1444                 ring_retire_requests(ring);
1445 }
1446
1447 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1448 #include "selftests/mock_request.c"
1449 #include "selftests/i915_request.c"
1450 #endif
This page took 0.126031 seconds and 4 git commands to generate.