]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/i915_gem_request.c
Merge tag 'v4.10-rc2' into drm-intel-next-queued
[linux.git] / drivers / gpu / drm / i915 / i915_gem_request.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/prefetch.h>
26 #include <linux/dma-fence-array.h>
27
28 #include "i915_drv.h"
29
30 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
31 {
32         return "i915";
33 }
34
35 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
36 {
37         return to_request(fence)->timeline->common->name;
38 }
39
40 static bool i915_fence_signaled(struct dma_fence *fence)
41 {
42         return i915_gem_request_completed(to_request(fence));
43 }
44
45 static bool i915_fence_enable_signaling(struct dma_fence *fence)
46 {
47         if (i915_fence_signaled(fence))
48                 return false;
49
50         intel_engine_enable_signaling(to_request(fence));
51         return true;
52 }
53
54 static signed long i915_fence_wait(struct dma_fence *fence,
55                                    bool interruptible,
56                                    signed long timeout)
57 {
58         return i915_wait_request(to_request(fence), interruptible, timeout);
59 }
60
61 static void i915_fence_release(struct dma_fence *fence)
62 {
63         struct drm_i915_gem_request *req = to_request(fence);
64
65         /* The request is put onto a RCU freelist (i.e. the address
66          * is immediately reused), mark the fences as being freed now.
67          * Otherwise the debugobjects for the fences are only marked as
68          * freed when the slab cache itself is freed, and so we would get
69          * caught trying to reuse dead objects.
70          */
71         i915_sw_fence_fini(&req->submit);
72         i915_sw_fence_fini(&req->execute);
73
74         kmem_cache_free(req->i915->requests, req);
75 }
76
77 const struct dma_fence_ops i915_fence_ops = {
78         .get_driver_name = i915_fence_get_driver_name,
79         .get_timeline_name = i915_fence_get_timeline_name,
80         .enable_signaling = i915_fence_enable_signaling,
81         .signaled = i915_fence_signaled,
82         .wait = i915_fence_wait,
83         .release = i915_fence_release,
84 };
85
86 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
87                                    struct drm_file *file)
88 {
89         struct drm_i915_private *dev_private;
90         struct drm_i915_file_private *file_priv;
91
92         WARN_ON(!req || !file || req->file_priv);
93
94         if (!req || !file)
95                 return -EINVAL;
96
97         if (req->file_priv)
98                 return -EINVAL;
99
100         dev_private = req->i915;
101         file_priv = file->driver_priv;
102
103         spin_lock(&file_priv->mm.lock);
104         req->file_priv = file_priv;
105         list_add_tail(&req->client_list, &file_priv->mm.request_list);
106         spin_unlock(&file_priv->mm.lock);
107
108         return 0;
109 }
110
111 static inline void
112 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
113 {
114         struct drm_i915_file_private *file_priv = request->file_priv;
115
116         if (!file_priv)
117                 return;
118
119         spin_lock(&file_priv->mm.lock);
120         list_del(&request->client_list);
121         request->file_priv = NULL;
122         spin_unlock(&file_priv->mm.lock);
123 }
124
125 static struct i915_dependency *
126 i915_dependency_alloc(struct drm_i915_private *i915)
127 {
128         return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
129 }
130
131 static void
132 i915_dependency_free(struct drm_i915_private *i915,
133                      struct i915_dependency *dep)
134 {
135         kmem_cache_free(i915->dependencies, dep);
136 }
137
138 static void
139 __i915_priotree_add_dependency(struct i915_priotree *pt,
140                                struct i915_priotree *signal,
141                                struct i915_dependency *dep,
142                                unsigned long flags)
143 {
144         INIT_LIST_HEAD(&dep->dfs_link);
145         list_add(&dep->wait_link, &signal->waiters_list);
146         list_add(&dep->signal_link, &pt->signalers_list);
147         dep->signaler = signal;
148         dep->flags = flags;
149 }
150
151 static int
152 i915_priotree_add_dependency(struct drm_i915_private *i915,
153                              struct i915_priotree *pt,
154                              struct i915_priotree *signal)
155 {
156         struct i915_dependency *dep;
157
158         dep = i915_dependency_alloc(i915);
159         if (!dep)
160                 return -ENOMEM;
161
162         __i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC);
163         return 0;
164 }
165
166 static void
167 i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
168 {
169         struct i915_dependency *dep, *next;
170
171         GEM_BUG_ON(!RB_EMPTY_NODE(&pt->node));
172
173         /* Everyone we depended upon (the fences we wait to be signaled)
174          * should retire before us and remove themselves from our list.
175          * However, retirement is run independently on each timeline and
176          * so we may be called out-of-order.
177          */
178         list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) {
179                 list_del(&dep->wait_link);
180                 if (dep->flags & I915_DEPENDENCY_ALLOC)
181                         i915_dependency_free(i915, dep);
182         }
183
184         /* Remove ourselves from everyone who depends upon us */
185         list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) {
186                 list_del(&dep->signal_link);
187                 if (dep->flags & I915_DEPENDENCY_ALLOC)
188                         i915_dependency_free(i915, dep);
189         }
190 }
191
192 static void
193 i915_priotree_init(struct i915_priotree *pt)
194 {
195         INIT_LIST_HEAD(&pt->signalers_list);
196         INIT_LIST_HEAD(&pt->waiters_list);
197         RB_CLEAR_NODE(&pt->node);
198         pt->priority = INT_MIN;
199 }
200
201 void i915_gem_retire_noop(struct i915_gem_active *active,
202                           struct drm_i915_gem_request *request)
203 {
204         /* Space left intentionally blank */
205 }
206
207 static void i915_gem_request_retire(struct drm_i915_gem_request *request)
208 {
209         struct intel_engine_cs *engine = request->engine;
210         struct i915_gem_active *active, *next;
211
212         lockdep_assert_held(&request->i915->drm.struct_mutex);
213         GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
214         GEM_BUG_ON(!i915_sw_fence_signaled(&request->execute));
215         GEM_BUG_ON(!i915_gem_request_completed(request));
216         GEM_BUG_ON(!request->i915->gt.active_requests);
217
218         trace_i915_gem_request_retire(request);
219
220         spin_lock_irq(&engine->timeline->lock);
221         list_del_init(&request->link);
222         spin_unlock_irq(&engine->timeline->lock);
223
224         /* We know the GPU must have read the request to have
225          * sent us the seqno + interrupt, so use the position
226          * of tail of the request to update the last known position
227          * of the GPU head.
228          *
229          * Note this requires that we are always called in request
230          * completion order.
231          */
232         list_del(&request->ring_link);
233         request->ring->last_retired_head = request->postfix;
234         if (!--request->i915->gt.active_requests) {
235                 GEM_BUG_ON(!request->i915->gt.awake);
236                 mod_delayed_work(request->i915->wq,
237                                  &request->i915->gt.idle_work,
238                                  msecs_to_jiffies(100));
239         }
240
241         /* Walk through the active list, calling retire on each. This allows
242          * objects to track their GPU activity and mark themselves as idle
243          * when their *last* active request is completed (updating state
244          * tracking lists for eviction, active references for GEM, etc).
245          *
246          * As the ->retire() may free the node, we decouple it first and
247          * pass along the auxiliary information (to avoid dereferencing
248          * the node after the callback).
249          */
250         list_for_each_entry_safe(active, next, &request->active_list, link) {
251                 /* In microbenchmarks or focusing upon time inside the kernel,
252                  * we may spend an inordinate amount of time simply handling
253                  * the retirement of requests and processing their callbacks.
254                  * Of which, this loop itself is particularly hot due to the
255                  * cache misses when jumping around the list of i915_gem_active.
256                  * So we try to keep this loop as streamlined as possible and
257                  * also prefetch the next i915_gem_active to try and hide
258                  * the likely cache miss.
259                  */
260                 prefetchw(next);
261
262                 INIT_LIST_HEAD(&active->link);
263                 RCU_INIT_POINTER(active->request, NULL);
264
265                 active->retire(active, request);
266         }
267
268         i915_gem_request_remove_from_client(request);
269
270         /* Retirement decays the ban score as it is a sign of ctx progress */
271         if (request->ctx->ban_score > 0)
272                 request->ctx->ban_score--;
273
274         /* The backing object for the context is done after switching to the
275          * *next* context. Therefore we cannot retire the previous context until
276          * the next context has already started running. However, since we
277          * cannot take the required locks at i915_gem_request_submit() we
278          * defer the unpinning of the active context to now, retirement of
279          * the subsequent request.
280          */
281         if (engine->last_retired_context)
282                 engine->context_unpin(engine, engine->last_retired_context);
283         engine->last_retired_context = request->ctx;
284
285         dma_fence_signal(&request->fence);
286
287         i915_priotree_fini(request->i915, &request->priotree);
288         i915_gem_request_put(request);
289 }
290
291 void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
292 {
293         struct intel_engine_cs *engine = req->engine;
294         struct drm_i915_gem_request *tmp;
295
296         lockdep_assert_held(&req->i915->drm.struct_mutex);
297         GEM_BUG_ON(!i915_gem_request_completed(req));
298
299         if (list_empty(&req->link))
300                 return;
301
302         do {
303                 tmp = list_first_entry(&engine->timeline->requests,
304                                        typeof(*tmp), link);
305
306                 i915_gem_request_retire(tmp);
307         } while (tmp != req);
308 }
309
310 static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
311 {
312         struct i915_gpu_error *error = &dev_priv->gpu_error;
313
314         if (i915_terminally_wedged(error))
315                 return -EIO;
316
317         if (i915_reset_in_progress(error)) {
318                 /* Non-interruptible callers can't handle -EAGAIN, hence return
319                  * -EIO unconditionally for these.
320                  */
321                 if (!dev_priv->mm.interruptible)
322                         return -EIO;
323
324                 return -EAGAIN;
325         }
326
327         return 0;
328 }
329
330 static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
331 {
332         struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
333         struct intel_engine_cs *engine;
334         enum intel_engine_id id;
335         int ret;
336
337         /* Carefully retire all requests without writing to the rings */
338         ret = i915_gem_wait_for_idle(i915,
339                                      I915_WAIT_INTERRUPTIBLE |
340                                      I915_WAIT_LOCKED);
341         if (ret)
342                 return ret;
343
344         i915_gem_retire_requests(i915);
345         GEM_BUG_ON(i915->gt.active_requests > 1);
346
347         /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
348         if (!i915_seqno_passed(seqno, atomic_read(&timeline->seqno))) {
349                 while (intel_breadcrumbs_busy(i915))
350                         cond_resched(); /* spin until threads are complete */
351         }
352         atomic_set(&timeline->seqno, seqno);
353
354         /* Finally reset hw state */
355         for_each_engine(engine, i915, id)
356                 intel_engine_init_global_seqno(engine, seqno);
357
358         list_for_each_entry(timeline, &i915->gt.timelines, link) {
359                 for_each_engine(engine, i915, id) {
360                         struct intel_timeline *tl = &timeline->engine[id];
361
362                         memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
363                 }
364         }
365
366         return 0;
367 }
368
369 int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
370 {
371         struct drm_i915_private *dev_priv = to_i915(dev);
372
373         lockdep_assert_held(&dev_priv->drm.struct_mutex);
374
375         if (seqno == 0)
376                 return -EINVAL;
377
378         /* HWS page needs to be set less than what we
379          * will inject to ring
380          */
381         return i915_gem_init_global_seqno(dev_priv, seqno - 1);
382 }
383
384 static int reserve_global_seqno(struct drm_i915_private *i915)
385 {
386         u32 active_requests = ++i915->gt.active_requests;
387         u32 seqno = atomic_read(&i915->gt.global_timeline.seqno);
388         int ret;
389
390         /* Reservation is fine until we need to wrap around */
391         if (likely(seqno + active_requests > seqno))
392                 return 0;
393
394         ret = i915_gem_init_global_seqno(i915, 0);
395         if (ret) {
396                 i915->gt.active_requests--;
397                 return ret;
398         }
399
400         return 0;
401 }
402
403 static u32 __timeline_get_seqno(struct i915_gem_timeline *tl)
404 {
405         /* seqno only incremented under a mutex */
406         return ++tl->seqno.counter;
407 }
408
409 static u32 timeline_get_seqno(struct i915_gem_timeline *tl)
410 {
411         return atomic_inc_return(&tl->seqno);
412 }
413
414 void __i915_gem_request_submit(struct drm_i915_gem_request *request)
415 {
416         struct intel_engine_cs *engine = request->engine;
417         struct intel_timeline *timeline;
418         u32 seqno;
419
420         /* Transfer from per-context onto the global per-engine timeline */
421         timeline = engine->timeline;
422         GEM_BUG_ON(timeline == request->timeline);
423         assert_spin_locked(&timeline->lock);
424
425         seqno = timeline_get_seqno(timeline->common);
426         GEM_BUG_ON(!seqno);
427         GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
428
429         GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno, seqno));
430         request->previous_seqno = timeline->last_submitted_seqno;
431         timeline->last_submitted_seqno = seqno;
432
433         /* We may be recursing from the signal callback of another i915 fence */
434         spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
435         request->global_seqno = seqno;
436         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
437                 intel_engine_enable_signaling(request);
438         spin_unlock(&request->lock);
439
440         GEM_BUG_ON(!request->global_seqno);
441         engine->emit_breadcrumb(request,
442                                 request->ring->vaddr + request->postfix);
443
444         spin_lock(&request->timeline->lock);
445         list_move_tail(&request->link, &timeline->requests);
446         spin_unlock(&request->timeline->lock);
447
448         i915_sw_fence_commit(&request->execute);
449 }
450
451 void i915_gem_request_submit(struct drm_i915_gem_request *request)
452 {
453         struct intel_engine_cs *engine = request->engine;
454         unsigned long flags;
455
456         /* Will be called from irq-context when using foreign fences. */
457         spin_lock_irqsave(&engine->timeline->lock, flags);
458
459         __i915_gem_request_submit(request);
460
461         spin_unlock_irqrestore(&engine->timeline->lock, flags);
462 }
463
464 static int __i915_sw_fence_call
465 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
466 {
467         struct drm_i915_gem_request *request =
468                 container_of(fence, typeof(*request), submit);
469
470         switch (state) {
471         case FENCE_COMPLETE:
472                 request->engine->submit_request(request);
473                 break;
474
475         case FENCE_FREE:
476                 i915_gem_request_put(request);
477                 break;
478         }
479
480         return NOTIFY_DONE;
481 }
482
483 static int __i915_sw_fence_call
484 execute_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
485 {
486         struct drm_i915_gem_request *request =
487                 container_of(fence, typeof(*request), execute);
488
489         switch (state) {
490         case FENCE_COMPLETE:
491                 break;
492
493         case FENCE_FREE:
494                 i915_gem_request_put(request);
495                 break;
496         }
497
498         return NOTIFY_DONE;
499 }
500
501 /**
502  * i915_gem_request_alloc - allocate a request structure
503  *
504  * @engine: engine that we wish to issue the request on.
505  * @ctx: context that the request will be associated with.
506  *       This can be NULL if the request is not directly related to
507  *       any specific user context, in which case this function will
508  *       choose an appropriate context to use.
509  *
510  * Returns a pointer to the allocated request if successful,
511  * or an error code if not.
512  */
513 struct drm_i915_gem_request *
514 i915_gem_request_alloc(struct intel_engine_cs *engine,
515                        struct i915_gem_context *ctx)
516 {
517         struct drm_i915_private *dev_priv = engine->i915;
518         struct drm_i915_gem_request *req;
519         int ret;
520
521         lockdep_assert_held(&dev_priv->drm.struct_mutex);
522
523         /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
524          * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
525          * and restart.
526          */
527         ret = i915_gem_check_wedge(dev_priv);
528         if (ret)
529                 return ERR_PTR(ret);
530
531         /* Pinning the contexts may generate requests in order to acquire
532          * GGTT space, so do this first before we reserve a seqno for
533          * ourselves.
534          */
535         ret = engine->context_pin(engine, ctx);
536         if (ret)
537                 return ERR_PTR(ret);
538
539         ret = reserve_global_seqno(dev_priv);
540         if (ret)
541                 goto err_unpin;
542
543         /* Move the oldest request to the slab-cache (if not in use!) */
544         req = list_first_entry_or_null(&engine->timeline->requests,
545                                        typeof(*req), link);
546         if (req && __i915_gem_request_completed(req))
547                 i915_gem_request_retire(req);
548
549         /* Beware: Dragons be flying overhead.
550          *
551          * We use RCU to look up requests in flight. The lookups may
552          * race with the request being allocated from the slab freelist.
553          * That is the request we are writing to here, may be in the process
554          * of being read by __i915_gem_active_get_rcu(). As such,
555          * we have to be very careful when overwriting the contents. During
556          * the RCU lookup, we change chase the request->engine pointer,
557          * read the request->global_seqno and increment the reference count.
558          *
559          * The reference count is incremented atomically. If it is zero,
560          * the lookup knows the request is unallocated and complete. Otherwise,
561          * it is either still in use, or has been reallocated and reset
562          * with dma_fence_init(). This increment is safe for release as we
563          * check that the request we have a reference to and matches the active
564          * request.
565          *
566          * Before we increment the refcount, we chase the request->engine
567          * pointer. We must not call kmem_cache_zalloc() or else we set
568          * that pointer to NULL and cause a crash during the lookup. If
569          * we see the request is completed (based on the value of the
570          * old engine and seqno), the lookup is complete and reports NULL.
571          * If we decide the request is not completed (new engine or seqno),
572          * then we grab a reference and double check that it is still the
573          * active request - which it won't be and restart the lookup.
574          *
575          * Do not use kmem_cache_zalloc() here!
576          */
577         req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
578         if (!req) {
579                 ret = -ENOMEM;
580                 goto err_unreserve;
581         }
582
583         req->timeline = i915_gem_context_lookup_timeline(ctx, engine);
584         GEM_BUG_ON(req->timeline == engine->timeline);
585
586         spin_lock_init(&req->lock);
587         dma_fence_init(&req->fence,
588                        &i915_fence_ops,
589                        &req->lock,
590                        req->timeline->fence_context,
591                        __timeline_get_seqno(req->timeline->common));
592
593         /* We bump the ref for the fence chain */
594         i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify);
595         i915_sw_fence_init(&i915_gem_request_get(req)->execute, execute_notify);
596
597         /* Ensure that the execute fence completes after the submit fence -
598          * as we complete the execute fence from within the submit fence
599          * callback, its completion would otherwise be visible first.
600          */
601         i915_sw_fence_await_sw_fence(&req->execute, &req->submit, &req->execq);
602
603         i915_priotree_init(&req->priotree);
604
605         INIT_LIST_HEAD(&req->active_list);
606         req->i915 = dev_priv;
607         req->engine = engine;
608         req->ctx = ctx;
609
610         /* No zalloc, must clear what we need by hand */
611         req->global_seqno = 0;
612         req->file_priv = NULL;
613         req->batch = NULL;
614
615         /*
616          * Reserve space in the ring buffer for all the commands required to
617          * eventually emit this request. This is to guarantee that the
618          * i915_add_request() call can't fail. Note that the reserve may need
619          * to be redone if the request is not actually submitted straight
620          * away, e.g. because a GPU scheduler has deferred it.
621          */
622         req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
623         GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
624
625         ret = engine->request_alloc(req);
626         if (ret)
627                 goto err_ctx;
628
629         /* Record the position of the start of the request so that
630          * should we detect the updated seqno part-way through the
631          * GPU processing the request, we never over-estimate the
632          * position of the head.
633          */
634         req->head = req->ring->tail;
635
636         return req;
637
638 err_ctx:
639         /* Make sure we didn't add ourselves to external state before freeing */
640         GEM_BUG_ON(!list_empty(&req->active_list));
641         GEM_BUG_ON(!list_empty(&req->priotree.signalers_list));
642         GEM_BUG_ON(!list_empty(&req->priotree.waiters_list));
643
644         kmem_cache_free(dev_priv->requests, req);
645 err_unreserve:
646         dev_priv->gt.active_requests--;
647 err_unpin:
648         engine->context_unpin(engine, ctx);
649         return ERR_PTR(ret);
650 }
651
652 static int
653 i915_gem_request_await_request(struct drm_i915_gem_request *to,
654                                struct drm_i915_gem_request *from)
655 {
656         int ret;
657
658         GEM_BUG_ON(to == from);
659
660         if (to->engine->schedule) {
661                 ret = i915_priotree_add_dependency(to->i915,
662                                                    &to->priotree,
663                                                    &from->priotree);
664                 if (ret < 0)
665                         return ret;
666         }
667
668         if (to->timeline == from->timeline)
669                 return 0;
670
671         if (to->engine == from->engine) {
672                 ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
673                                                        &from->submit,
674                                                        GFP_KERNEL);
675                 return ret < 0 ? ret : 0;
676         }
677
678         if (!from->global_seqno) {
679                 ret = i915_sw_fence_await_dma_fence(&to->submit,
680                                                     &from->fence, 0,
681                                                     GFP_KERNEL);
682                 return ret < 0 ? ret : 0;
683         }
684
685         if (from->global_seqno <= to->timeline->sync_seqno[from->engine->id])
686                 return 0;
687
688         trace_i915_gem_ring_sync_to(to, from);
689         if (!i915.semaphores) {
690                 if (!i915_spin_request(from, TASK_INTERRUPTIBLE, 2)) {
691                         ret = i915_sw_fence_await_dma_fence(&to->submit,
692                                                             &from->fence, 0,
693                                                             GFP_KERNEL);
694                         if (ret < 0)
695                                 return ret;
696                 }
697         } else {
698                 ret = to->engine->semaphore.sync_to(to, from);
699                 if (ret)
700                         return ret;
701         }
702
703         to->timeline->sync_seqno[from->engine->id] = from->global_seqno;
704         return 0;
705 }
706
707 int
708 i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
709                                  struct dma_fence *fence)
710 {
711         struct dma_fence_array *array;
712         int ret;
713         int i;
714
715         if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
716                 return 0;
717
718         if (dma_fence_is_i915(fence))
719                 return i915_gem_request_await_request(req, to_request(fence));
720
721         if (!dma_fence_is_array(fence)) {
722                 ret = i915_sw_fence_await_dma_fence(&req->submit,
723                                                     fence, I915_FENCE_TIMEOUT,
724                                                     GFP_KERNEL);
725                 return ret < 0 ? ret : 0;
726         }
727
728         /* Note that if the fence-array was created in signal-on-any mode,
729          * we should *not* decompose it into its individual fences. However,
730          * we don't currently store which mode the fence-array is operating
731          * in. Fortunately, the only user of signal-on-any is private to
732          * amdgpu and we should not see any incoming fence-array from
733          * sync-file being in signal-on-any mode.
734          */
735
736         array = to_dma_fence_array(fence);
737         for (i = 0; i < array->num_fences; i++) {
738                 struct dma_fence *child = array->fences[i];
739
740                 if (dma_fence_is_i915(child))
741                         ret = i915_gem_request_await_request(req,
742                                                              to_request(child));
743                 else
744                         ret = i915_sw_fence_await_dma_fence(&req->submit,
745                                                             child, I915_FENCE_TIMEOUT,
746                                                             GFP_KERNEL);
747                 if (ret < 0)
748                         return ret;
749         }
750
751         return 0;
752 }
753
754 /**
755  * i915_gem_request_await_object - set this request to (async) wait upon a bo
756  *
757  * @to: request we are wishing to use
758  * @obj: object which may be in use on another ring.
759  *
760  * This code is meant to abstract object synchronization with the GPU.
761  * Conceptually we serialise writes between engines inside the GPU.
762  * We only allow one engine to write into a buffer at any time, but
763  * multiple readers. To ensure each has a coherent view of memory, we must:
764  *
765  * - If there is an outstanding write request to the object, the new
766  *   request must wait for it to complete (either CPU or in hw, requests
767  *   on the same ring will be naturally ordered).
768  *
769  * - If we are a write request (pending_write_domain is set), the new
770  *   request must wait for outstanding read requests to complete.
771  *
772  * Returns 0 if successful, else propagates up the lower layer error.
773  */
774 int
775 i915_gem_request_await_object(struct drm_i915_gem_request *to,
776                               struct drm_i915_gem_object *obj,
777                               bool write)
778 {
779         struct dma_fence *excl;
780         int ret = 0;
781
782         if (write) {
783                 struct dma_fence **shared;
784                 unsigned int count, i;
785
786                 ret = reservation_object_get_fences_rcu(obj->resv,
787                                                         &excl, &count, &shared);
788                 if (ret)
789                         return ret;
790
791                 for (i = 0; i < count; i++) {
792                         ret = i915_gem_request_await_dma_fence(to, shared[i]);
793                         if (ret)
794                                 break;
795
796                         dma_fence_put(shared[i]);
797                 }
798
799                 for (; i < count; i++)
800                         dma_fence_put(shared[i]);
801                 kfree(shared);
802         } else {
803                 excl = reservation_object_get_excl_rcu(obj->resv);
804         }
805
806         if (excl) {
807                 if (ret == 0)
808                         ret = i915_gem_request_await_dma_fence(to, excl);
809
810                 dma_fence_put(excl);
811         }
812
813         return ret;
814 }
815
816 static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
817 {
818         struct drm_i915_private *dev_priv = engine->i915;
819
820         if (dev_priv->gt.awake)
821                 return;
822
823         GEM_BUG_ON(!dev_priv->gt.active_requests);
824
825         intel_runtime_pm_get_noresume(dev_priv);
826         dev_priv->gt.awake = true;
827
828         intel_enable_gt_powersave(dev_priv);
829         i915_update_gfx_val(dev_priv);
830         if (INTEL_GEN(dev_priv) >= 6)
831                 gen6_rps_busy(dev_priv);
832
833         queue_delayed_work(dev_priv->wq,
834                            &dev_priv->gt.retire_work,
835                            round_jiffies_up_relative(HZ));
836 }
837
838 /*
839  * NB: This function is not allowed to fail. Doing so would mean the the
840  * request is not being tracked for completion but the work itself is
841  * going to happen on the hardware. This would be a Bad Thing(tm).
842  */
843 void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
844 {
845         struct intel_engine_cs *engine = request->engine;
846         struct intel_ring *ring = request->ring;
847         struct intel_timeline *timeline = request->timeline;
848         struct drm_i915_gem_request *prev;
849         int err;
850
851         lockdep_assert_held(&request->i915->drm.struct_mutex);
852         trace_i915_gem_request_add(request);
853
854         /*
855          * To ensure that this call will not fail, space for its emissions
856          * should already have been reserved in the ring buffer. Let the ring
857          * know that it is time to use that space up.
858          */
859         request->reserved_space = 0;
860
861         /*
862          * Emit any outstanding flushes - execbuf can fail to emit the flush
863          * after having emitted the batchbuffer command. Hence we need to fix
864          * things up similar to emitting the lazy request. The difference here
865          * is that the flush _must_ happen before the next request, no matter
866          * what.
867          */
868         if (flush_caches) {
869                 err = engine->emit_flush(request, EMIT_FLUSH);
870
871                 /* Not allowed to fail! */
872                 WARN(err, "engine->emit_flush() failed: %d!\n", err);
873         }
874
875         /* Record the position of the start of the breadcrumb so that
876          * should we detect the updated seqno part-way through the
877          * GPU processing the request, we never over-estimate the
878          * position of the ring's HEAD.
879          */
880         err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
881         GEM_BUG_ON(err);
882         request->postfix = ring->tail;
883         ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
884
885         /* Seal the request and mark it as pending execution. Note that
886          * we may inspect this state, without holding any locks, during
887          * hangcheck. Hence we apply the barrier to ensure that we do not
888          * see a more recent value in the hws than we are tracking.
889          */
890
891         prev = i915_gem_active_raw(&timeline->last_request,
892                                    &request->i915->drm.struct_mutex);
893         if (prev) {
894                 i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
895                                              &request->submitq);
896                 if (engine->schedule)
897                         __i915_priotree_add_dependency(&request->priotree,
898                                                        &prev->priotree,
899                                                        &request->dep,
900                                                        0);
901         }
902
903         spin_lock_irq(&timeline->lock);
904         list_add_tail(&request->link, &timeline->requests);
905         spin_unlock_irq(&timeline->lock);
906
907         GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
908                                      request->fence.seqno));
909
910         timeline->last_submitted_seqno = request->fence.seqno;
911         i915_gem_active_set(&timeline->last_request, request);
912
913         list_add_tail(&request->ring_link, &ring->request_list);
914         request->emitted_jiffies = jiffies;
915
916         i915_gem_mark_busy(engine);
917
918         /* Let the backend know a new request has arrived that may need
919          * to adjust the existing execution schedule due to a high priority
920          * request - i.e. we may want to preempt the current request in order
921          * to run a high priority dependency chain *before* we can execute this
922          * request.
923          *
924          * This is called before the request is ready to run so that we can
925          * decide whether to preempt the entire chain so that it is ready to
926          * run at the earliest possible convenience.
927          */
928         if (engine->schedule)
929                 engine->schedule(request, request->ctx->priority);
930
931         local_bh_disable();
932         i915_sw_fence_commit(&request->submit);
933         local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
934 }
935
936 static void reset_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
937 {
938         unsigned long flags;
939
940         spin_lock_irqsave(&q->lock, flags);
941         if (list_empty(&wait->task_list))
942                 __add_wait_queue(q, wait);
943         spin_unlock_irqrestore(&q->lock, flags);
944 }
945
946 static unsigned long local_clock_us(unsigned int *cpu)
947 {
948         unsigned long t;
949
950         /* Cheaply and approximately convert from nanoseconds to microseconds.
951          * The result and subsequent calculations are also defined in the same
952          * approximate microseconds units. The principal source of timing
953          * error here is from the simple truncation.
954          *
955          * Note that local_clock() is only defined wrt to the current CPU;
956          * the comparisons are no longer valid if we switch CPUs. Instead of
957          * blocking preemption for the entire busywait, we can detect the CPU
958          * switch and use that as indicator of system load and a reason to
959          * stop busywaiting, see busywait_stop().
960          */
961         *cpu = get_cpu();
962         t = local_clock() >> 10;
963         put_cpu();
964
965         return t;
966 }
967
968 static bool busywait_stop(unsigned long timeout, unsigned int cpu)
969 {
970         unsigned int this_cpu;
971
972         if (time_after(local_clock_us(&this_cpu), timeout))
973                 return true;
974
975         return this_cpu != cpu;
976 }
977
978 bool __i915_spin_request(const struct drm_i915_gem_request *req,
979                          int state, unsigned long timeout_us)
980 {
981         unsigned int cpu;
982
983         /* When waiting for high frequency requests, e.g. during synchronous
984          * rendering split between the CPU and GPU, the finite amount of time
985          * required to set up the irq and wait upon it limits the response
986          * rate. By busywaiting on the request completion for a short while we
987          * can service the high frequency waits as quick as possible. However,
988          * if it is a slow request, we want to sleep as quickly as possible.
989          * The tradeoff between waiting and sleeping is roughly the time it
990          * takes to sleep on a request, on the order of a microsecond.
991          */
992
993         timeout_us += local_clock_us(&cpu);
994         do {
995                 if (__i915_gem_request_completed(req))
996                         return true;
997
998                 if (signal_pending_state(state, current))
999                         break;
1000
1001                 if (busywait_stop(timeout_us, cpu))
1002                         break;
1003
1004                 cpu_relax();
1005         } while (!need_resched());
1006
1007         return false;
1008 }
1009
1010 static long
1011 __i915_request_wait_for_execute(struct drm_i915_gem_request *request,
1012                                 unsigned int flags,
1013                                 long timeout)
1014 {
1015         const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1016                 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1017         wait_queue_head_t *q = &request->i915->gpu_error.wait_queue;
1018         DEFINE_WAIT(reset);
1019         DEFINE_WAIT(wait);
1020
1021         if (flags & I915_WAIT_LOCKED)
1022                 add_wait_queue(q, &reset);
1023
1024         do {
1025                 prepare_to_wait(&request->execute.wait, &wait, state);
1026
1027                 if (i915_sw_fence_done(&request->execute))
1028                         break;
1029
1030                 if (flags & I915_WAIT_LOCKED &&
1031                     i915_reset_in_progress(&request->i915->gpu_error)) {
1032                         __set_current_state(TASK_RUNNING);
1033                         i915_reset(request->i915);
1034                         reset_wait_queue(q, &reset);
1035                         continue;
1036                 }
1037
1038                 if (signal_pending_state(state, current)) {
1039                         timeout = -ERESTARTSYS;
1040                         break;
1041                 }
1042
1043                 timeout = io_schedule_timeout(timeout);
1044         } while (timeout);
1045         finish_wait(&request->execute.wait, &wait);
1046
1047         if (flags & I915_WAIT_LOCKED)
1048                 remove_wait_queue(q, &reset);
1049
1050         return timeout;
1051 }
1052
1053 /**
1054  * i915_wait_request - wait until execution of request has finished
1055  * @req: the request to wait upon
1056  * @flags: how to wait
1057  * @timeout: how long to wait in jiffies
1058  *
1059  * i915_wait_request() waits for the request to be completed, for a
1060  * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1061  * unbounded wait).
1062  *
1063  * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
1064  * in via the flags, and vice versa if the struct_mutex is not held, the caller
1065  * must not specify that the wait is locked.
1066  *
1067  * Returns the remaining time (in jiffies) if the request completed, which may
1068  * be zero or -ETIME if the request is unfinished after the timeout expires.
1069  * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1070  * pending before the request completes.
1071  */
1072 long i915_wait_request(struct drm_i915_gem_request *req,
1073                        unsigned int flags,
1074                        long timeout)
1075 {
1076         const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1077                 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1078         DEFINE_WAIT(reset);
1079         struct intel_wait wait;
1080
1081         might_sleep();
1082 #if IS_ENABLED(CONFIG_LOCKDEP)
1083         GEM_BUG_ON(debug_locks &&
1084                    !!lockdep_is_held(&req->i915->drm.struct_mutex) !=
1085                    !!(flags & I915_WAIT_LOCKED));
1086 #endif
1087         GEM_BUG_ON(timeout < 0);
1088
1089         if (i915_gem_request_completed(req))
1090                 return timeout;
1091
1092         if (!timeout)
1093                 return -ETIME;
1094
1095         trace_i915_gem_request_wait_begin(req);
1096
1097         if (!i915_sw_fence_done(&req->execute)) {
1098                 timeout = __i915_request_wait_for_execute(req, flags, timeout);
1099                 if (timeout < 0)
1100                         goto complete;
1101
1102                 GEM_BUG_ON(!i915_sw_fence_done(&req->execute));
1103         }
1104         GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
1105         GEM_BUG_ON(!req->global_seqno);
1106
1107         /* Optimistic short spin before touching IRQs */
1108         if (i915_spin_request(req, state, 5))
1109                 goto complete;
1110
1111         set_current_state(state);
1112         if (flags & I915_WAIT_LOCKED)
1113                 add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
1114
1115         intel_wait_init(&wait, req->global_seqno);
1116         if (intel_engine_add_wait(req->engine, &wait))
1117                 /* In order to check that we haven't missed the interrupt
1118                  * as we enabled it, we need to kick ourselves to do a
1119                  * coherent check on the seqno before we sleep.
1120                  */
1121                 goto wakeup;
1122
1123         for (;;) {
1124                 if (signal_pending_state(state, current)) {
1125                         timeout = -ERESTARTSYS;
1126                         break;
1127                 }
1128
1129                 if (!timeout) {
1130                         timeout = -ETIME;
1131                         break;
1132                 }
1133
1134                 timeout = io_schedule_timeout(timeout);
1135
1136                 if (intel_wait_complete(&wait))
1137                         break;
1138
1139                 set_current_state(state);
1140
1141 wakeup:
1142                 /* Carefully check if the request is complete, giving time
1143                  * for the seqno to be visible following the interrupt.
1144                  * We also have to check in case we are kicked by the GPU
1145                  * reset in order to drop the struct_mutex.
1146                  */
1147                 if (__i915_request_irq_complete(req))
1148                         break;
1149
1150                 /* If the GPU is hung, and we hold the lock, reset the GPU
1151                  * and then check for completion. On a full reset, the engine's
1152                  * HW seqno will be advanced passed us and we are complete.
1153                  * If we do a partial reset, we have to wait for the GPU to
1154                  * resume and update the breadcrumb.
1155                  *
1156                  * If we don't hold the mutex, we can just wait for the worker
1157                  * to come along and update the breadcrumb (either directly
1158                  * itself, or indirectly by recovering the GPU).
1159                  */
1160                 if (flags & I915_WAIT_LOCKED &&
1161                     i915_reset_in_progress(&req->i915->gpu_error)) {
1162                         __set_current_state(TASK_RUNNING);
1163                         i915_reset(req->i915);
1164                         reset_wait_queue(&req->i915->gpu_error.wait_queue,
1165                                          &reset);
1166                         continue;
1167                 }
1168
1169                 /* Only spin if we know the GPU is processing this request */
1170                 if (i915_spin_request(req, state, 2))
1171                         break;
1172         }
1173
1174         intel_engine_remove_wait(req->engine, &wait);
1175         if (flags & I915_WAIT_LOCKED)
1176                 remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
1177         __set_current_state(TASK_RUNNING);
1178
1179 complete:
1180         trace_i915_gem_request_wait_end(req);
1181
1182         return timeout;
1183 }
1184
1185 static void engine_retire_requests(struct intel_engine_cs *engine)
1186 {
1187         struct drm_i915_gem_request *request, *next;
1188
1189         list_for_each_entry_safe(request, next,
1190                                  &engine->timeline->requests, link) {
1191                 if (!__i915_gem_request_completed(request))
1192                         return;
1193
1194                 i915_gem_request_retire(request);
1195         }
1196 }
1197
1198 void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
1199 {
1200         struct intel_engine_cs *engine;
1201         enum intel_engine_id id;
1202
1203         lockdep_assert_held(&dev_priv->drm.struct_mutex);
1204
1205         if (!dev_priv->gt.active_requests)
1206                 return;
1207
1208         for_each_engine(engine, dev_priv, id)
1209                 engine_retire_requests(engine);
1210 }
This page took 0.101924 seconds and 4 git commands to generate.