]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
Merge tag 'sound-5.13-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_fence.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <[email protected]>
29  *    Dave Airlie
30  */
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/pm_runtime.h>
38
39 #include "amdgpu.h"
40 #include "amdgpu_trace.h"
41
42 /*
43  * Fences
44  * Fences mark an event in the GPUs pipeline and are used
45  * for GPU/CPU synchronization.  When the fence is written,
46  * it is expected that all buffers associated with that fence
47  * are no longer in use by the associated ring on the GPU and
48  * that the the relevant GPU caches have been flushed.
49  */
50
51 struct amdgpu_fence {
52         struct dma_fence base;
53
54         /* RB, DMA, etc. */
55         struct amdgpu_ring              *ring;
56 };
57
58 static struct kmem_cache *amdgpu_fence_slab;
59
60 int amdgpu_fence_slab_init(void)
61 {
62         amdgpu_fence_slab = kmem_cache_create(
63                 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
64                 SLAB_HWCACHE_ALIGN, NULL);
65         if (!amdgpu_fence_slab)
66                 return -ENOMEM;
67         return 0;
68 }
69
70 void amdgpu_fence_slab_fini(void)
71 {
72         rcu_barrier();
73         kmem_cache_destroy(amdgpu_fence_slab);
74 }
75 /*
76  * Cast helper
77  */
78 static const struct dma_fence_ops amdgpu_fence_ops;
79 static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
80 {
81         struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
82
83         if (__f->base.ops == &amdgpu_fence_ops)
84                 return __f;
85
86         return NULL;
87 }
88
89 /**
90  * amdgpu_fence_write - write a fence value
91  *
92  * @ring: ring the fence is associated with
93  * @seq: sequence number to write
94  *
95  * Writes a fence value to memory (all asics).
96  */
97 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
98 {
99         struct amdgpu_fence_driver *drv = &ring->fence_drv;
100
101         if (drv->cpu_addr)
102                 *drv->cpu_addr = cpu_to_le32(seq);
103 }
104
105 /**
106  * amdgpu_fence_read - read a fence value
107  *
108  * @ring: ring the fence is associated with
109  *
110  * Reads a fence value from memory (all asics).
111  * Returns the value of the fence read from memory.
112  */
113 static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
114 {
115         struct amdgpu_fence_driver *drv = &ring->fence_drv;
116         u32 seq = 0;
117
118         if (drv->cpu_addr)
119                 seq = le32_to_cpu(*drv->cpu_addr);
120         else
121                 seq = atomic_read(&drv->last_seq);
122
123         return seq;
124 }
125
126 /**
127  * amdgpu_fence_emit - emit a fence on the requested ring
128  *
129  * @ring: ring the fence is associated with
130  * @f: resulting fence object
131  * @flags: flags to pass into the subordinate .emit_fence() call
132  *
133  * Emits a fence command on the requested ring (all asics).
134  * Returns 0 on success, -ENOMEM on failure.
135  */
136 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
137                       unsigned flags)
138 {
139         struct amdgpu_device *adev = ring->adev;
140         struct amdgpu_fence *fence;
141         struct dma_fence __rcu **ptr;
142         uint32_t seq;
143         int r;
144
145         fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
146         if (fence == NULL)
147                 return -ENOMEM;
148
149         seq = ++ring->fence_drv.sync_seq;
150         fence->ring = ring;
151         dma_fence_init(&fence->base, &amdgpu_fence_ops,
152                        &ring->fence_drv.lock,
153                        adev->fence_context + ring->idx,
154                        seq);
155         amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
156                                seq, flags | AMDGPU_FENCE_FLAG_INT);
157         pm_runtime_get_noresume(adev_to_drm(adev)->dev);
158         ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
159         if (unlikely(rcu_dereference_protected(*ptr, 1))) {
160                 struct dma_fence *old;
161
162                 rcu_read_lock();
163                 old = dma_fence_get_rcu_safe(ptr);
164                 rcu_read_unlock();
165
166                 if (old) {
167                         r = dma_fence_wait(old, false);
168                         dma_fence_put(old);
169                         if (r)
170                                 return r;
171                 }
172         }
173
174         /* This function can't be called concurrently anyway, otherwise
175          * emitting the fence would mess up the hardware ring buffer.
176          */
177         rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
178
179         *f = &fence->base;
180
181         return 0;
182 }
183
184 /**
185  * amdgpu_fence_emit_polling - emit a fence on the requeste ring
186  *
187  * @ring: ring the fence is associated with
188  * @s: resulting sequence number
189  * @timeout: the timeout for waiting in usecs
190  *
191  * Emits a fence command on the requested ring (all asics).
192  * Used For polling fence.
193  * Returns 0 on success, -ENOMEM on failure.
194  */
195 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
196                               uint32_t timeout)
197 {
198         uint32_t seq;
199         signed long r;
200
201         if (!s)
202                 return -EINVAL;
203
204         seq = ++ring->fence_drv.sync_seq;
205         r = amdgpu_fence_wait_polling(ring,
206                                       seq - ring->fence_drv.num_fences_mask,
207                                       timeout);
208         if (r < 1)
209                 return -ETIMEDOUT;
210
211         amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
212                                seq, 0);
213
214         *s = seq;
215
216         return 0;
217 }
218
219 /**
220  * amdgpu_fence_schedule_fallback - schedule fallback check
221  *
222  * @ring: pointer to struct amdgpu_ring
223  *
224  * Start a timer as fallback to our interrupts.
225  */
226 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
227 {
228         mod_timer(&ring->fence_drv.fallback_timer,
229                   jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
230 }
231
232 /**
233  * amdgpu_fence_process - check for fence activity
234  *
235  * @ring: pointer to struct amdgpu_ring
236  *
237  * Checks the current fence value and calculates the last
238  * signalled fence value. Wakes the fence queue if the
239  * sequence number has increased.
240  *
241  * Returns true if fence was processed
242  */
243 bool amdgpu_fence_process(struct amdgpu_ring *ring)
244 {
245         struct amdgpu_fence_driver *drv = &ring->fence_drv;
246         struct amdgpu_device *adev = ring->adev;
247         uint32_t seq, last_seq;
248         int r;
249
250         do {
251                 last_seq = atomic_read(&ring->fence_drv.last_seq);
252                 seq = amdgpu_fence_read(ring);
253
254         } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
255
256         if (del_timer(&ring->fence_drv.fallback_timer) &&
257             seq != ring->fence_drv.sync_seq)
258                 amdgpu_fence_schedule_fallback(ring);
259
260         if (unlikely(seq == last_seq))
261                 return false;
262
263         last_seq &= drv->num_fences_mask;
264         seq &= drv->num_fences_mask;
265
266         do {
267                 struct dma_fence *fence, **ptr;
268
269                 ++last_seq;
270                 last_seq &= drv->num_fences_mask;
271                 ptr = &drv->fences[last_seq];
272
273                 /* There is always exactly one thread signaling this fence slot */
274                 fence = rcu_dereference_protected(*ptr, 1);
275                 RCU_INIT_POINTER(*ptr, NULL);
276
277                 if (!fence)
278                         continue;
279
280                 r = dma_fence_signal(fence);
281                 if (!r)
282                         DMA_FENCE_TRACE(fence, "signaled from irq context\n");
283                 else
284                         BUG();
285
286                 dma_fence_put(fence);
287                 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
288                 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
289         } while (last_seq != seq);
290
291         return true;
292 }
293
294 /**
295  * amdgpu_fence_fallback - fallback for hardware interrupts
296  *
297  * @t: timer context used to obtain the pointer to ring structure
298  *
299  * Checks for fence activity.
300  */
301 static void amdgpu_fence_fallback(struct timer_list *t)
302 {
303         struct amdgpu_ring *ring = from_timer(ring, t,
304                                               fence_drv.fallback_timer);
305
306         if (amdgpu_fence_process(ring))
307                 DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
308 }
309
310 /**
311  * amdgpu_fence_wait_empty - wait for all fences to signal
312  *
313  * @ring: ring index the fence is associated with
314  *
315  * Wait for all fences on the requested ring to signal (all asics).
316  * Returns 0 if the fences have passed, error for all other cases.
317  */
318 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
319 {
320         uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
321         struct dma_fence *fence, **ptr;
322         int r;
323
324         if (!seq)
325                 return 0;
326
327         ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
328         rcu_read_lock();
329         fence = rcu_dereference(*ptr);
330         if (!fence || !dma_fence_get_rcu(fence)) {
331                 rcu_read_unlock();
332                 return 0;
333         }
334         rcu_read_unlock();
335
336         r = dma_fence_wait(fence, false);
337         dma_fence_put(fence);
338         return r;
339 }
340
341 /**
342  * amdgpu_fence_wait_polling - busy wait for givn sequence number
343  *
344  * @ring: ring index the fence is associated with
345  * @wait_seq: sequence number to wait
346  * @timeout: the timeout for waiting in usecs
347  *
348  * Wait for all fences on the requested ring to signal (all asics).
349  * Returns left time if no timeout, 0 or minus if timeout.
350  */
351 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
352                                       uint32_t wait_seq,
353                                       signed long timeout)
354 {
355         uint32_t seq;
356
357         do {
358                 seq = amdgpu_fence_read(ring);
359                 udelay(5);
360                 timeout -= 5;
361         } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
362
363         return timeout > 0 ? timeout : 0;
364 }
365 /**
366  * amdgpu_fence_count_emitted - get the count of emitted fences
367  *
368  * @ring: ring the fence is associated with
369  *
370  * Get the number of fences emitted on the requested ring (all asics).
371  * Returns the number of emitted fences on the ring.  Used by the
372  * dynpm code to ring track activity.
373  */
374 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
375 {
376         uint64_t emitted;
377
378         /* We are not protected by ring lock when reading the last sequence
379          * but it's ok to report slightly wrong fence count here.
380          */
381         amdgpu_fence_process(ring);
382         emitted = 0x100000000ull;
383         emitted -= atomic_read(&ring->fence_drv.last_seq);
384         emitted += READ_ONCE(ring->fence_drv.sync_seq);
385         return lower_32_bits(emitted);
386 }
387
388 /**
389  * amdgpu_fence_driver_start_ring - make the fence driver
390  * ready for use on the requested ring.
391  *
392  * @ring: ring to start the fence driver on
393  * @irq_src: interrupt source to use for this ring
394  * @irq_type: interrupt type to use for this ring
395  *
396  * Make the fence driver ready for processing (all asics).
397  * Not all asics have all rings, so each asic will only
398  * start the fence driver on the rings it has.
399  * Returns 0 for success, errors for failure.
400  */
401 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
402                                    struct amdgpu_irq_src *irq_src,
403                                    unsigned irq_type)
404 {
405         struct amdgpu_device *adev = ring->adev;
406         uint64_t index;
407
408         if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
409                 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
410                 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
411         } else {
412                 /* put fence directly behind firmware */
413                 index = ALIGN(adev->uvd.fw->size, 8);
414                 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
415                 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
416         }
417         amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
418
419         if (irq_src)
420                 amdgpu_irq_get(adev, irq_src, irq_type);
421
422         ring->fence_drv.irq_src = irq_src;
423         ring->fence_drv.irq_type = irq_type;
424         ring->fence_drv.initialized = true;
425
426         DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n",
427                       ring->name, ring->fence_drv.gpu_addr);
428         return 0;
429 }
430
431 /**
432  * amdgpu_fence_driver_init_ring - init the fence driver
433  * for the requested ring.
434  *
435  * @ring: ring to init the fence driver on
436  * @num_hw_submission: number of entries on the hardware queue
437  *
438  * Init the fence driver for the requested ring (all asics).
439  * Helper function for amdgpu_fence_driver_init().
440  */
441 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
442                                   unsigned num_hw_submission,
443                                   atomic_t *sched_score)
444 {
445         struct amdgpu_device *adev = ring->adev;
446         long timeout;
447         int r;
448
449         if (!adev)
450                 return -EINVAL;
451
452         if (!is_power_of_2(num_hw_submission))
453                 return -EINVAL;
454
455         ring->fence_drv.cpu_addr = NULL;
456         ring->fence_drv.gpu_addr = 0;
457         ring->fence_drv.sync_seq = 0;
458         atomic_set(&ring->fence_drv.last_seq, 0);
459         ring->fence_drv.initialized = false;
460
461         timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
462
463         ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
464         spin_lock_init(&ring->fence_drv.lock);
465         ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
466                                          GFP_KERNEL);
467         if (!ring->fence_drv.fences)
468                 return -ENOMEM;
469
470         /* No need to setup the GPU scheduler for rings that don't need it */
471         if (ring->no_scheduler)
472                 return 0;
473
474         switch (ring->funcs->type) {
475         case AMDGPU_RING_TYPE_GFX:
476                 timeout = adev->gfx_timeout;
477                 break;
478         case AMDGPU_RING_TYPE_COMPUTE:
479                 timeout = adev->compute_timeout;
480                 break;
481         case AMDGPU_RING_TYPE_SDMA:
482                 timeout = adev->sdma_timeout;
483                 break;
484         default:
485                 timeout = adev->video_timeout;
486                 break;
487         }
488
489         r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
490                            num_hw_submission, amdgpu_job_hang_limit,
491                            timeout, sched_score, ring->name);
492         if (r) {
493                 DRM_ERROR("Failed to create scheduler on ring %s.\n",
494                           ring->name);
495                 return r;
496         }
497
498         return 0;
499 }
500
501 /**
502  * amdgpu_fence_driver_init - init the fence driver
503  * for all possible rings.
504  *
505  * @adev: amdgpu device pointer
506  *
507  * Init the fence driver for all possible rings (all asics).
508  * Not all asics have all rings, so each asic will only
509  * start the fence driver on the rings it has using
510  * amdgpu_fence_driver_start_ring().
511  * Returns 0 for success.
512  */
513 int amdgpu_fence_driver_init(struct amdgpu_device *adev)
514 {
515         return 0;
516 }
517
518 /**
519  * amdgpu_fence_driver_fini - tear down the fence driver
520  * for all possible rings.
521  *
522  * @adev: amdgpu device pointer
523  *
524  * Tear down the fence driver for all possible rings (all asics).
525  */
526 void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
527 {
528         unsigned i, j;
529         int r;
530
531         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
532                 struct amdgpu_ring *ring = adev->rings[i];
533
534                 if (!ring || !ring->fence_drv.initialized)
535                         continue;
536                 if (!ring->no_scheduler)
537                         drm_sched_fini(&ring->sched);
538                 r = amdgpu_fence_wait_empty(ring);
539                 if (r) {
540                         /* no need to trigger GPU reset as we are unloading */
541                         amdgpu_fence_driver_force_completion(ring);
542                 }
543                 if (ring->fence_drv.irq_src)
544                         amdgpu_irq_put(adev, ring->fence_drv.irq_src,
545                                        ring->fence_drv.irq_type);
546
547                 del_timer_sync(&ring->fence_drv.fallback_timer);
548                 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
549                         dma_fence_put(ring->fence_drv.fences[j]);
550                 kfree(ring->fence_drv.fences);
551                 ring->fence_drv.fences = NULL;
552                 ring->fence_drv.initialized = false;
553         }
554 }
555
556 /**
557  * amdgpu_fence_driver_suspend - suspend the fence driver
558  * for all possible rings.
559  *
560  * @adev: amdgpu device pointer
561  *
562  * Suspend the fence driver for all possible rings (all asics).
563  */
564 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
565 {
566         int i, r;
567
568         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
569                 struct amdgpu_ring *ring = adev->rings[i];
570                 if (!ring || !ring->fence_drv.initialized)
571                         continue;
572
573                 /* wait for gpu to finish processing current batch */
574                 r = amdgpu_fence_wait_empty(ring);
575                 if (r) {
576                         /* delay GPU reset to resume */
577                         amdgpu_fence_driver_force_completion(ring);
578                 }
579
580                 /* disable the interrupt */
581                 if (ring->fence_drv.irq_src)
582                         amdgpu_irq_put(adev, ring->fence_drv.irq_src,
583                                        ring->fence_drv.irq_type);
584         }
585 }
586
587 /**
588  * amdgpu_fence_driver_resume - resume the fence driver
589  * for all possible rings.
590  *
591  * @adev: amdgpu device pointer
592  *
593  * Resume the fence driver for all possible rings (all asics).
594  * Not all asics have all rings, so each asic will only
595  * start the fence driver on the rings it has using
596  * amdgpu_fence_driver_start_ring().
597  * Returns 0 for success.
598  */
599 void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
600 {
601         int i;
602
603         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
604                 struct amdgpu_ring *ring = adev->rings[i];
605                 if (!ring || !ring->fence_drv.initialized)
606                         continue;
607
608                 /* enable the interrupt */
609                 if (ring->fence_drv.irq_src)
610                         amdgpu_irq_get(adev, ring->fence_drv.irq_src,
611                                        ring->fence_drv.irq_type);
612         }
613 }
614
615 /**
616  * amdgpu_fence_driver_force_completion - force signal latest fence of ring
617  *
618  * @ring: fence of the ring to signal
619  *
620  */
621 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
622 {
623         amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
624         amdgpu_fence_process(ring);
625 }
626
627 /*
628  * Common fence implementation
629  */
630
631 static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
632 {
633         return "amdgpu";
634 }
635
636 static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
637 {
638         struct amdgpu_fence *fence = to_amdgpu_fence(f);
639         return (const char *)fence->ring->name;
640 }
641
642 /**
643  * amdgpu_fence_enable_signaling - enable signalling on fence
644  * @f: fence
645  *
646  * This function is called with fence_queue lock held, and adds a callback
647  * to fence_queue that checks if this fence is signaled, and if so it
648  * signals the fence and removes itself.
649  */
650 static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
651 {
652         struct amdgpu_fence *fence = to_amdgpu_fence(f);
653         struct amdgpu_ring *ring = fence->ring;
654
655         if (!timer_pending(&ring->fence_drv.fallback_timer))
656                 amdgpu_fence_schedule_fallback(ring);
657
658         DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
659
660         return true;
661 }
662
663 /**
664  * amdgpu_fence_free - free up the fence memory
665  *
666  * @rcu: RCU callback head
667  *
668  * Free up the fence memory after the RCU grace period.
669  */
670 static void amdgpu_fence_free(struct rcu_head *rcu)
671 {
672         struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
673         struct amdgpu_fence *fence = to_amdgpu_fence(f);
674         kmem_cache_free(amdgpu_fence_slab, fence);
675 }
676
677 /**
678  * amdgpu_fence_release - callback that fence can be freed
679  *
680  * @f: fence
681  *
682  * This function is called when the reference count becomes zero.
683  * It just RCU schedules freeing up the fence.
684  */
685 static void amdgpu_fence_release(struct dma_fence *f)
686 {
687         call_rcu(&f->rcu, amdgpu_fence_free);
688 }
689
690 static const struct dma_fence_ops amdgpu_fence_ops = {
691         .get_driver_name = amdgpu_fence_get_driver_name,
692         .get_timeline_name = amdgpu_fence_get_timeline_name,
693         .enable_signaling = amdgpu_fence_enable_signaling,
694         .release = amdgpu_fence_release,
695 };
696
697 /*
698  * Fence debugfs
699  */
700 #if defined(CONFIG_DEBUG_FS)
701 static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused)
702 {
703         struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
704         int i;
705
706         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
707                 struct amdgpu_ring *ring = adev->rings[i];
708                 if (!ring || !ring->fence_drv.initialized)
709                         continue;
710
711                 amdgpu_fence_process(ring);
712
713                 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
714                 seq_printf(m, "Last signaled fence          0x%08x\n",
715                            atomic_read(&ring->fence_drv.last_seq));
716                 seq_printf(m, "Last emitted                 0x%08x\n",
717                            ring->fence_drv.sync_seq);
718
719                 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
720                     ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
721                         seq_printf(m, "Last signaled trailing fence 0x%08x\n",
722                                    le32_to_cpu(*ring->trail_fence_cpu_addr));
723                         seq_printf(m, "Last emitted                 0x%08x\n",
724                                    ring->trail_seq);
725                 }
726
727                 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
728                         continue;
729
730                 /* set in CP_VMID_PREEMPT and preemption occurred */
731                 seq_printf(m, "Last preempted               0x%08x\n",
732                            le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
733                 /* set in CP_VMID_RESET and reset occurred */
734                 seq_printf(m, "Last reset                   0x%08x\n",
735                            le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
736                 /* Both preemption and reset occurred */
737                 seq_printf(m, "Last both                    0x%08x\n",
738                            le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
739         }
740         return 0;
741 }
742
743 /*
744  * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
745  *
746  * Manually trigger a gpu reset at the next fence wait.
747  */
748 static int gpu_recover_get(void *data, u64 *val)
749 {
750         struct amdgpu_device *adev = (struct amdgpu_device *)data;
751         struct drm_device *dev = adev_to_drm(adev);
752         int r;
753
754         r = pm_runtime_get_sync(dev->dev);
755         if (r < 0) {
756                 pm_runtime_put_autosuspend(dev->dev);
757                 return 0;
758         }
759
760         *val = amdgpu_device_gpu_recover(adev, NULL);
761
762         pm_runtime_mark_last_busy(dev->dev);
763         pm_runtime_put_autosuspend(dev->dev);
764
765         return 0;
766 }
767
768 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info);
769 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL,
770                          "%lld\n");
771
772 #endif
773
774 void amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
775 {
776 #if defined(CONFIG_DEBUG_FS)
777         struct drm_minor *minor = adev_to_drm(adev)->primary;
778         struct dentry *root = minor->debugfs_root;
779
780         debugfs_create_file("amdgpu_fence_info", 0444, root, adev,
781                             &amdgpu_debugfs_fence_info_fops);
782
783         if (!amdgpu_sriov_vf(adev))
784                 debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev,
785                                     &amdgpu_debugfs_gpu_recover_fops);
786 #endif
787 }
788
This page took 0.078672 seconds and 4 git commands to generate.