]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
Merge drm/drm-next into drm-intel-next
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_fence.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <[email protected]>
29  *    Dave Airlie
30  */
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/pm_runtime.h>
38
39 #include <drm/drm_drv.h>
40 #include "amdgpu.h"
41 #include "amdgpu_trace.h"
42 #include "amdgpu_reset.h"
43
44 /*
45  * Fences
46  * Fences mark an event in the GPUs pipeline and are used
47  * for GPU/CPU synchronization.  When the fence is written,
48  * it is expected that all buffers associated with that fence
49  * are no longer in use by the associated ring on the GPU and
50  * that the relevant GPU caches have been flushed.
51  */
52
53 struct amdgpu_fence {
54         struct dma_fence base;
55
56         /* RB, DMA, etc. */
57         struct amdgpu_ring              *ring;
58 };
59
60 static struct kmem_cache *amdgpu_fence_slab;
61
62 int amdgpu_fence_slab_init(void)
63 {
64         amdgpu_fence_slab = kmem_cache_create(
65                 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
66                 SLAB_HWCACHE_ALIGN, NULL);
67         if (!amdgpu_fence_slab)
68                 return -ENOMEM;
69         return 0;
70 }
71
72 void amdgpu_fence_slab_fini(void)
73 {
74         rcu_barrier();
75         kmem_cache_destroy(amdgpu_fence_slab);
76 }
77 /*
78  * Cast helper
79  */
80 static const struct dma_fence_ops amdgpu_fence_ops;
81 static const struct dma_fence_ops amdgpu_job_fence_ops;
82 static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
83 {
84         struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
85
86         if (__f->base.ops == &amdgpu_fence_ops ||
87             __f->base.ops == &amdgpu_job_fence_ops)
88                 return __f;
89
90         return NULL;
91 }
92
93 /**
94  * amdgpu_fence_write - write a fence value
95  *
96  * @ring: ring the fence is associated with
97  * @seq: sequence number to write
98  *
99  * Writes a fence value to memory (all asics).
100  */
101 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
102 {
103         struct amdgpu_fence_driver *drv = &ring->fence_drv;
104
105         if (drv->cpu_addr)
106                 *drv->cpu_addr = cpu_to_le32(seq);
107 }
108
109 /**
110  * amdgpu_fence_read - read a fence value
111  *
112  * @ring: ring the fence is associated with
113  *
114  * Reads a fence value from memory (all asics).
115  * Returns the value of the fence read from memory.
116  */
117 static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
118 {
119         struct amdgpu_fence_driver *drv = &ring->fence_drv;
120         u32 seq = 0;
121
122         if (drv->cpu_addr)
123                 seq = le32_to_cpu(*drv->cpu_addr);
124         else
125                 seq = atomic_read(&drv->last_seq);
126
127         return seq;
128 }
129
130 /**
131  * amdgpu_fence_emit - emit a fence on the requested ring
132  *
133  * @ring: ring the fence is associated with
134  * @f: resulting fence object
135  * @job: job the fence is embedded in
136  * @flags: flags to pass into the subordinate .emit_fence() call
137  *
138  * Emits a fence command on the requested ring (all asics).
139  * Returns 0 on success, -ENOMEM on failure.
140  */
141 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
142                       unsigned flags)
143 {
144         struct amdgpu_device *adev = ring->adev;
145         struct dma_fence *fence;
146         struct amdgpu_fence *am_fence;
147         struct dma_fence __rcu **ptr;
148         uint32_t seq;
149         int r;
150
151         if (job == NULL) {
152                 /* create a sperate hw fence */
153                 am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
154                 if (am_fence == NULL)
155                         return -ENOMEM;
156                 fence = &am_fence->base;
157                 am_fence->ring = ring;
158         } else {
159                 /* take use of job-embedded fence */
160                 fence = &job->hw_fence;
161         }
162
163         seq = ++ring->fence_drv.sync_seq;
164         if (job && job->job_run_counter) {
165                 /* reinit seq for resubmitted jobs */
166                 fence->seqno = seq;
167                 /* TO be inline with external fence creation and other drivers */
168                 dma_fence_get(fence);
169         } else {
170                 if (job) {
171                         dma_fence_init(fence, &amdgpu_job_fence_ops,
172                                        &ring->fence_drv.lock,
173                                        adev->fence_context + ring->idx, seq);
174                         /* Against remove in amdgpu_job_{free, free_cb} */
175                         dma_fence_get(fence);
176                 }
177                 else
178                         dma_fence_init(fence, &amdgpu_fence_ops,
179                                        &ring->fence_drv.lock,
180                                        adev->fence_context + ring->idx, seq);
181         }
182
183         amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
184                                seq, flags | AMDGPU_FENCE_FLAG_INT);
185         pm_runtime_get_noresume(adev_to_drm(adev)->dev);
186         ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
187         if (unlikely(rcu_dereference_protected(*ptr, 1))) {
188                 struct dma_fence *old;
189
190                 rcu_read_lock();
191                 old = dma_fence_get_rcu_safe(ptr);
192                 rcu_read_unlock();
193
194                 if (old) {
195                         r = dma_fence_wait(old, false);
196                         dma_fence_put(old);
197                         if (r)
198                                 return r;
199                 }
200         }
201
202         /* This function can't be called concurrently anyway, otherwise
203          * emitting the fence would mess up the hardware ring buffer.
204          */
205         rcu_assign_pointer(*ptr, dma_fence_get(fence));
206
207         *f = fence;
208
209         return 0;
210 }
211
212 /**
213  * amdgpu_fence_emit_polling - emit a fence on the requeste ring
214  *
215  * @ring: ring the fence is associated with
216  * @s: resulting sequence number
217  * @timeout: the timeout for waiting in usecs
218  *
219  * Emits a fence command on the requested ring (all asics).
220  * Used For polling fence.
221  * Returns 0 on success, -ENOMEM on failure.
222  */
223 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
224                               uint32_t timeout)
225 {
226         uint32_t seq;
227         signed long r;
228
229         if (!s)
230                 return -EINVAL;
231
232         seq = ++ring->fence_drv.sync_seq;
233         r = amdgpu_fence_wait_polling(ring,
234                                       seq - ring->fence_drv.num_fences_mask,
235                                       timeout);
236         if (r < 1)
237                 return -ETIMEDOUT;
238
239         amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
240                                seq, 0);
241
242         *s = seq;
243
244         return 0;
245 }
246
247 /**
248  * amdgpu_fence_schedule_fallback - schedule fallback check
249  *
250  * @ring: pointer to struct amdgpu_ring
251  *
252  * Start a timer as fallback to our interrupts.
253  */
254 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
255 {
256         mod_timer(&ring->fence_drv.fallback_timer,
257                   jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
258 }
259
260 /**
261  * amdgpu_fence_process - check for fence activity
262  *
263  * @ring: pointer to struct amdgpu_ring
264  *
265  * Checks the current fence value and calculates the last
266  * signalled fence value. Wakes the fence queue if the
267  * sequence number has increased.
268  *
269  * Returns true if fence was processed
270  */
271 bool amdgpu_fence_process(struct amdgpu_ring *ring)
272 {
273         struct amdgpu_fence_driver *drv = &ring->fence_drv;
274         struct amdgpu_device *adev = ring->adev;
275         uint32_t seq, last_seq;
276
277         do {
278                 last_seq = atomic_read(&ring->fence_drv.last_seq);
279                 seq = amdgpu_fence_read(ring);
280
281         } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
282
283         if (del_timer(&ring->fence_drv.fallback_timer) &&
284             seq != ring->fence_drv.sync_seq)
285                 amdgpu_fence_schedule_fallback(ring);
286
287         if (unlikely(seq == last_seq))
288                 return false;
289
290         last_seq &= drv->num_fences_mask;
291         seq &= drv->num_fences_mask;
292
293         do {
294                 struct dma_fence *fence, **ptr;
295
296                 ++last_seq;
297                 last_seq &= drv->num_fences_mask;
298                 ptr = &drv->fences[last_seq];
299
300                 /* There is always exactly one thread signaling this fence slot */
301                 fence = rcu_dereference_protected(*ptr, 1);
302                 RCU_INIT_POINTER(*ptr, NULL);
303
304                 if (!fence)
305                         continue;
306
307                 dma_fence_signal(fence);
308                 dma_fence_put(fence);
309                 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
310                 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
311         } while (last_seq != seq);
312
313         return true;
314 }
315
316 /**
317  * amdgpu_fence_fallback - fallback for hardware interrupts
318  *
319  * @t: timer context used to obtain the pointer to ring structure
320  *
321  * Checks for fence activity.
322  */
323 static void amdgpu_fence_fallback(struct timer_list *t)
324 {
325         struct amdgpu_ring *ring = from_timer(ring, t,
326                                               fence_drv.fallback_timer);
327
328         if (amdgpu_fence_process(ring))
329                 DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
330 }
331
332 /**
333  * amdgpu_fence_wait_empty - wait for all fences to signal
334  *
335  * @ring: ring index the fence is associated with
336  *
337  * Wait for all fences on the requested ring to signal (all asics).
338  * Returns 0 if the fences have passed, error for all other cases.
339  */
340 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
341 {
342         uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
343         struct dma_fence *fence, **ptr;
344         int r;
345
346         if (!seq)
347                 return 0;
348
349         ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
350         rcu_read_lock();
351         fence = rcu_dereference(*ptr);
352         if (!fence || !dma_fence_get_rcu(fence)) {
353                 rcu_read_unlock();
354                 return 0;
355         }
356         rcu_read_unlock();
357
358         r = dma_fence_wait(fence, false);
359         dma_fence_put(fence);
360         return r;
361 }
362
363 /**
364  * amdgpu_fence_wait_polling - busy wait for givn sequence number
365  *
366  * @ring: ring index the fence is associated with
367  * @wait_seq: sequence number to wait
368  * @timeout: the timeout for waiting in usecs
369  *
370  * Wait for all fences on the requested ring to signal (all asics).
371  * Returns left time if no timeout, 0 or minus if timeout.
372  */
373 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
374                                       uint32_t wait_seq,
375                                       signed long timeout)
376 {
377         uint32_t seq;
378
379         do {
380                 seq = amdgpu_fence_read(ring);
381                 udelay(5);
382                 timeout -= 5;
383         } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
384
385         return timeout > 0 ? timeout : 0;
386 }
387 /**
388  * amdgpu_fence_count_emitted - get the count of emitted fences
389  *
390  * @ring: ring the fence is associated with
391  *
392  * Get the number of fences emitted on the requested ring (all asics).
393  * Returns the number of emitted fences on the ring.  Used by the
394  * dynpm code to ring track activity.
395  */
396 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
397 {
398         uint64_t emitted;
399
400         /* We are not protected by ring lock when reading the last sequence
401          * but it's ok to report slightly wrong fence count here.
402          */
403         amdgpu_fence_process(ring);
404         emitted = 0x100000000ull;
405         emitted -= atomic_read(&ring->fence_drv.last_seq);
406         emitted += READ_ONCE(ring->fence_drv.sync_seq);
407         return lower_32_bits(emitted);
408 }
409
410 /**
411  * amdgpu_fence_driver_start_ring - make the fence driver
412  * ready for use on the requested ring.
413  *
414  * @ring: ring to start the fence driver on
415  * @irq_src: interrupt source to use for this ring
416  * @irq_type: interrupt type to use for this ring
417  *
418  * Make the fence driver ready for processing (all asics).
419  * Not all asics have all rings, so each asic will only
420  * start the fence driver on the rings it has.
421  * Returns 0 for success, errors for failure.
422  */
423 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
424                                    struct amdgpu_irq_src *irq_src,
425                                    unsigned irq_type)
426 {
427         struct amdgpu_device *adev = ring->adev;
428         uint64_t index;
429
430         if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
431                 ring->fence_drv.cpu_addr = ring->fence_cpu_addr;
432                 ring->fence_drv.gpu_addr = ring->fence_gpu_addr;
433         } else {
434                 /* put fence directly behind firmware */
435                 index = ALIGN(adev->uvd.fw->size, 8);
436                 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
437                 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
438         }
439         amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
440
441         ring->fence_drv.irq_src = irq_src;
442         ring->fence_drv.irq_type = irq_type;
443         ring->fence_drv.initialized = true;
444
445         DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n",
446                       ring->name, ring->fence_drv.gpu_addr);
447         return 0;
448 }
449
450 /**
451  * amdgpu_fence_driver_init_ring - init the fence driver
452  * for the requested ring.
453  *
454  * @ring: ring to init the fence driver on
455  *
456  * Init the fence driver for the requested ring (all asics).
457  * Helper function for amdgpu_fence_driver_init().
458  */
459 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
460 {
461         struct amdgpu_device *adev = ring->adev;
462
463         if (!adev)
464                 return -EINVAL;
465
466         if (!is_power_of_2(ring->num_hw_submission))
467                 return -EINVAL;
468
469         ring->fence_drv.cpu_addr = NULL;
470         ring->fence_drv.gpu_addr = 0;
471         ring->fence_drv.sync_seq = 0;
472         atomic_set(&ring->fence_drv.last_seq, 0);
473         ring->fence_drv.initialized = false;
474
475         timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
476
477         ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1;
478         spin_lock_init(&ring->fence_drv.lock);
479         ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *),
480                                          GFP_KERNEL);
481
482         if (!ring->fence_drv.fences)
483                 return -ENOMEM;
484
485         return 0;
486 }
487
488 /**
489  * amdgpu_fence_driver_sw_init - init the fence driver
490  * for all possible rings.
491  *
492  * @adev: amdgpu device pointer
493  *
494  * Init the fence driver for all possible rings (all asics).
495  * Not all asics have all rings, so each asic will only
496  * start the fence driver on the rings it has using
497  * amdgpu_fence_driver_start_ring().
498  * Returns 0 for success.
499  */
500 int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
501 {
502         return 0;
503 }
504
505 /**
506  * amdgpu_fence_driver_hw_fini - tear down the fence driver
507  * for all possible rings.
508  *
509  * @adev: amdgpu device pointer
510  *
511  * Tear down the fence driver for all possible rings (all asics).
512  */
513 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
514 {
515         int i, r;
516
517         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
518                 struct amdgpu_ring *ring = adev->rings[i];
519
520                 if (!ring || !ring->fence_drv.initialized)
521                         continue;
522
523                 /* You can't wait for HW to signal if it's gone */
524                 if (!drm_dev_is_unplugged(adev_to_drm(adev)))
525                         r = amdgpu_fence_wait_empty(ring);
526                 else
527                         r = -ENODEV;
528                 /* no need to trigger GPU reset as we are unloading */
529                 if (r)
530                         amdgpu_fence_driver_force_completion(ring);
531
532                 if (ring->fence_drv.irq_src)
533                         amdgpu_irq_put(adev, ring->fence_drv.irq_src,
534                                        ring->fence_drv.irq_type);
535
536                 del_timer_sync(&ring->fence_drv.fallback_timer);
537         }
538 }
539
540 /* Will either stop and flush handlers for amdgpu interrupt or reanble it */
541 void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop)
542 {
543         int i;
544
545         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
546                 struct amdgpu_ring *ring = adev->rings[i];
547
548                 if (!ring || !ring->fence_drv.initialized || !ring->fence_drv.irq_src)
549                         continue;
550
551                 if (stop)
552                         disable_irq(adev->irq.irq);
553                 else
554                         enable_irq(adev->irq.irq);
555         }
556 }
557
558 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
559 {
560         unsigned int i, j;
561
562         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
563                 struct amdgpu_ring *ring = adev->rings[i];
564
565                 if (!ring || !ring->fence_drv.initialized)
566                         continue;
567
568                 if (!ring->no_scheduler)
569                         drm_sched_fini(&ring->sched);
570
571                 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
572                         dma_fence_put(ring->fence_drv.fences[j]);
573                 kfree(ring->fence_drv.fences);
574                 ring->fence_drv.fences = NULL;
575                 ring->fence_drv.initialized = false;
576         }
577 }
578
579 /**
580  * amdgpu_fence_driver_hw_init - enable the fence driver
581  * for all possible rings.
582  *
583  * @adev: amdgpu device pointer
584  *
585  * Enable the fence driver for all possible rings (all asics).
586  * Not all asics have all rings, so each asic will only
587  * start the fence driver on the rings it has using
588  * amdgpu_fence_driver_start_ring().
589  * Returns 0 for success.
590  */
591 void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
592 {
593         int i;
594
595         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
596                 struct amdgpu_ring *ring = adev->rings[i];
597                 if (!ring || !ring->fence_drv.initialized)
598                         continue;
599
600                 /* enable the interrupt */
601                 if (ring->fence_drv.irq_src)
602                         amdgpu_irq_get(adev, ring->fence_drv.irq_src,
603                                        ring->fence_drv.irq_type);
604         }
605 }
606
607 /**
608  * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
609  *
610  * @ring: fence of the ring to be cleared
611  *
612  */
613 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
614 {
615         int i;
616         struct dma_fence *old, **ptr;
617
618         for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
619                 ptr = &ring->fence_drv.fences[i];
620                 old = rcu_dereference_protected(*ptr, 1);
621                 if (old && old->ops == &amdgpu_job_fence_ops) {
622                         RCU_INIT_POINTER(*ptr, NULL);
623                         dma_fence_put(old);
624                 }
625         }
626 }
627
628 /**
629  * amdgpu_fence_driver_force_completion - force signal latest fence of ring
630  *
631  * @ring: fence of the ring to signal
632  *
633  */
634 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
635 {
636         amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
637         amdgpu_fence_process(ring);
638 }
639
640 /*
641  * Common fence implementation
642  */
643
644 static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
645 {
646         return "amdgpu";
647 }
648
649 static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
650 {
651         return (const char *)to_amdgpu_fence(f)->ring->name;
652 }
653
654 static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
655 {
656         struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
657
658         return (const char *)to_amdgpu_ring(job->base.sched)->name;
659 }
660
661 /**
662  * amdgpu_fence_enable_signaling - enable signalling on fence
663  * @f: fence
664  *
665  * This function is called with fence_queue lock held, and adds a callback
666  * to fence_queue that checks if this fence is signaled, and if so it
667  * signals the fence and removes itself.
668  */
669 static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
670 {
671         if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
672                 amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
673
674         return true;
675 }
676
677 /**
678  * amdgpu_job_fence_enable_signaling - enable signalling on job fence
679  * @f: fence
680  *
681  * This is the simliar function with amdgpu_fence_enable_signaling above, it
682  * only handles the job embedded fence.
683  */
684 static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
685 {
686         struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
687
688         if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
689                 amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
690
691         return true;
692 }
693
694 /**
695  * amdgpu_fence_free - free up the fence memory
696  *
697  * @rcu: RCU callback head
698  *
699  * Free up the fence memory after the RCU grace period.
700  */
701 static void amdgpu_fence_free(struct rcu_head *rcu)
702 {
703         struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
704
705         /* free fence_slab if it's separated fence*/
706         kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
707 }
708
709 /**
710  * amdgpu_job_fence_free - free up the job with embedded fence
711  *
712  * @rcu: RCU callback head
713  *
714  * Free up the job with embedded fence after the RCU grace period.
715  */
716 static void amdgpu_job_fence_free(struct rcu_head *rcu)
717 {
718         struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
719
720         /* free job if fence has a parent job */
721         kfree(container_of(f, struct amdgpu_job, hw_fence));
722 }
723
724 /**
725  * amdgpu_fence_release - callback that fence can be freed
726  *
727  * @f: fence
728  *
729  * This function is called when the reference count becomes zero.
730  * It just RCU schedules freeing up the fence.
731  */
732 static void amdgpu_fence_release(struct dma_fence *f)
733 {
734         call_rcu(&f->rcu, amdgpu_fence_free);
735 }
736
737 /**
738  * amdgpu_job_fence_release - callback that job embedded fence can be freed
739  *
740  * @f: fence
741  *
742  * This is the simliar function with amdgpu_fence_release above, it
743  * only handles the job embedded fence.
744  */
745 static void amdgpu_job_fence_release(struct dma_fence *f)
746 {
747         call_rcu(&f->rcu, amdgpu_job_fence_free);
748 }
749
750 static const struct dma_fence_ops amdgpu_fence_ops = {
751         .get_driver_name = amdgpu_fence_get_driver_name,
752         .get_timeline_name = amdgpu_fence_get_timeline_name,
753         .enable_signaling = amdgpu_fence_enable_signaling,
754         .release = amdgpu_fence_release,
755 };
756
757 static const struct dma_fence_ops amdgpu_job_fence_ops = {
758         .get_driver_name = amdgpu_fence_get_driver_name,
759         .get_timeline_name = amdgpu_job_fence_get_timeline_name,
760         .enable_signaling = amdgpu_job_fence_enable_signaling,
761         .release = amdgpu_job_fence_release,
762 };
763
764 /*
765  * Fence debugfs
766  */
767 #if defined(CONFIG_DEBUG_FS)
768 static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused)
769 {
770         struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
771         int i;
772
773         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
774                 struct amdgpu_ring *ring = adev->rings[i];
775                 if (!ring || !ring->fence_drv.initialized)
776                         continue;
777
778                 amdgpu_fence_process(ring);
779
780                 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
781                 seq_printf(m, "Last signaled fence          0x%08x\n",
782                            atomic_read(&ring->fence_drv.last_seq));
783                 seq_printf(m, "Last emitted                 0x%08x\n",
784                            ring->fence_drv.sync_seq);
785
786                 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
787                     ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
788                         seq_printf(m, "Last signaled trailing fence 0x%08x\n",
789                                    le32_to_cpu(*ring->trail_fence_cpu_addr));
790                         seq_printf(m, "Last emitted                 0x%08x\n",
791                                    ring->trail_seq);
792                 }
793
794                 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
795                         continue;
796
797                 /* set in CP_VMID_PREEMPT and preemption occurred */
798                 seq_printf(m, "Last preempted               0x%08x\n",
799                            le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
800                 /* set in CP_VMID_RESET and reset occurred */
801                 seq_printf(m, "Last reset                   0x%08x\n",
802                            le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
803                 /* Both preemption and reset occurred */
804                 seq_printf(m, "Last both                    0x%08x\n",
805                            le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
806         }
807         return 0;
808 }
809
810 /*
811  * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
812  *
813  * Manually trigger a gpu reset at the next fence wait.
814  */
815 static int gpu_recover_get(void *data, u64 *val)
816 {
817         struct amdgpu_device *adev = (struct amdgpu_device *)data;
818         struct drm_device *dev = adev_to_drm(adev);
819         int r;
820
821         r = pm_runtime_get_sync(dev->dev);
822         if (r < 0) {
823                 pm_runtime_put_autosuspend(dev->dev);
824                 return 0;
825         }
826
827         if (amdgpu_reset_domain_schedule(adev->reset_domain, &adev->reset_work))
828                 flush_work(&adev->reset_work);
829
830         *val = atomic_read(&adev->reset_domain->reset_res);
831
832         pm_runtime_mark_last_busy(dev->dev);
833         pm_runtime_put_autosuspend(dev->dev);
834
835         return 0;
836 }
837
838 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info);
839 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL,
840                          "%lld\n");
841
842 static void amdgpu_debugfs_reset_work(struct work_struct *work)
843 {
844         struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
845                                                   reset_work);
846
847         struct amdgpu_reset_context reset_context;
848         memset(&reset_context, 0, sizeof(reset_context));
849
850         reset_context.method = AMD_RESET_METHOD_NONE;
851         reset_context.reset_req_dev = adev;
852         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
853
854         amdgpu_device_gpu_recover(adev, NULL, &reset_context);
855 }
856
857 #endif
858
859 void amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
860 {
861 #if defined(CONFIG_DEBUG_FS)
862         struct drm_minor *minor = adev_to_drm(adev)->primary;
863         struct dentry *root = minor->debugfs_root;
864
865         debugfs_create_file("amdgpu_fence_info", 0444, root, adev,
866                             &amdgpu_debugfs_fence_info_fops);
867
868         if (!amdgpu_sriov_vf(adev)) {
869
870                 INIT_WORK(&adev->reset_work, amdgpu_debugfs_reset_work);
871                 debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev,
872                                     &amdgpu_debugfs_gpu_recover_fops);
873         }
874 #endif
875 }
876
This page took 0.084703 seconds and 4 git commands to generate.