2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <linux/pm_runtime.h>
39 #include <drm/drm_drv.h>
41 #include "amdgpu_trace.h"
42 #include "amdgpu_reset.h"
45 * Fences mark an event in the GPUs pipeline and are used
46 * for GPU/CPU synchronization. When the fence is written,
47 * it is expected that all buffers associated with that fence
48 * are no longer in use by the associated ring on the GPU and
49 * that the relevant GPU caches have been flushed.
53 struct dma_fence base;
56 struct amdgpu_ring *ring;
57 ktime_t start_timestamp;
60 static struct kmem_cache *amdgpu_fence_slab;
62 int amdgpu_fence_slab_init(void)
64 amdgpu_fence_slab = kmem_cache_create(
65 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
66 SLAB_HWCACHE_ALIGN, NULL);
67 if (!amdgpu_fence_slab)
72 void amdgpu_fence_slab_fini(void)
75 kmem_cache_destroy(amdgpu_fence_slab);
80 static const struct dma_fence_ops amdgpu_fence_ops;
81 static const struct dma_fence_ops amdgpu_job_fence_ops;
82 static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
84 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
86 if (__f->base.ops == &amdgpu_fence_ops ||
87 __f->base.ops == &amdgpu_job_fence_ops)
94 * amdgpu_fence_write - write a fence value
96 * @ring: ring the fence is associated with
97 * @seq: sequence number to write
99 * Writes a fence value to memory (all asics).
101 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
103 struct amdgpu_fence_driver *drv = &ring->fence_drv;
106 *drv->cpu_addr = cpu_to_le32(seq);
110 * amdgpu_fence_read - read a fence value
112 * @ring: ring the fence is associated with
114 * Reads a fence value from memory (all asics).
115 * Returns the value of the fence read from memory.
117 static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
119 struct amdgpu_fence_driver *drv = &ring->fence_drv;
123 seq = le32_to_cpu(*drv->cpu_addr);
125 seq = atomic_read(&drv->last_seq);
131 * amdgpu_fence_emit - emit a fence on the requested ring
133 * @ring: ring the fence is associated with
134 * @f: resulting fence object
135 * @job: job the fence is embedded in
136 * @flags: flags to pass into the subordinate .emit_fence() call
138 * Emits a fence command on the requested ring (all asics).
139 * Returns 0 on success, -ENOMEM on failure.
141 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
144 struct amdgpu_device *adev = ring->adev;
145 struct dma_fence *fence;
146 struct amdgpu_fence *am_fence;
147 struct dma_fence __rcu **ptr;
152 /* create a sperate hw fence */
153 am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
154 if (am_fence == NULL)
156 fence = &am_fence->base;
157 am_fence->ring = ring;
159 /* take use of job-embedded fence */
160 fence = &job->hw_fence;
163 seq = ++ring->fence_drv.sync_seq;
164 if (job && job->job_run_counter) {
165 /* reinit seq for resubmitted jobs */
167 /* TO be inline with external fence creation and other drivers */
168 dma_fence_get(fence);
171 dma_fence_init(fence, &amdgpu_job_fence_ops,
172 &ring->fence_drv.lock,
173 adev->fence_context + ring->idx, seq);
174 /* Against remove in amdgpu_job_{free, free_cb} */
175 dma_fence_get(fence);
177 dma_fence_init(fence, &amdgpu_fence_ops,
178 &ring->fence_drv.lock,
179 adev->fence_context + ring->idx, seq);
183 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
184 seq, flags | AMDGPU_FENCE_FLAG_INT);
185 pm_runtime_get_noresume(adev_to_drm(adev)->dev);
186 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
187 if (unlikely(rcu_dereference_protected(*ptr, 1))) {
188 struct dma_fence *old;
191 old = dma_fence_get_rcu_safe(ptr);
195 r = dma_fence_wait(old, false);
202 to_amdgpu_fence(fence)->start_timestamp = ktime_get();
204 /* This function can't be called concurrently anyway, otherwise
205 * emitting the fence would mess up the hardware ring buffer.
207 rcu_assign_pointer(*ptr, dma_fence_get(fence));
215 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
217 * @ring: ring the fence is associated with
218 * @s: resulting sequence number
219 * @timeout: the timeout for waiting in usecs
221 * Emits a fence command on the requested ring (all asics).
222 * Used For polling fence.
223 * Returns 0 on success, -ENOMEM on failure.
225 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
234 seq = ++ring->fence_drv.sync_seq;
235 r = amdgpu_fence_wait_polling(ring,
236 seq - ring->fence_drv.num_fences_mask,
241 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
250 * amdgpu_fence_schedule_fallback - schedule fallback check
252 * @ring: pointer to struct amdgpu_ring
254 * Start a timer as fallback to our interrupts.
256 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
258 mod_timer(&ring->fence_drv.fallback_timer,
259 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
263 * amdgpu_fence_process - check for fence activity
265 * @ring: pointer to struct amdgpu_ring
267 * Checks the current fence value and calculates the last
268 * signalled fence value. Wakes the fence queue if the
269 * sequence number has increased.
271 * Returns true if fence was processed
273 bool amdgpu_fence_process(struct amdgpu_ring *ring)
275 struct amdgpu_fence_driver *drv = &ring->fence_drv;
276 struct amdgpu_device *adev = ring->adev;
277 uint32_t seq, last_seq;
280 last_seq = atomic_read(&ring->fence_drv.last_seq);
281 seq = amdgpu_fence_read(ring);
283 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
285 if (del_timer(&ring->fence_drv.fallback_timer) &&
286 seq != ring->fence_drv.sync_seq)
287 amdgpu_fence_schedule_fallback(ring);
289 if (unlikely(seq == last_seq))
292 last_seq &= drv->num_fences_mask;
293 seq &= drv->num_fences_mask;
296 struct dma_fence *fence, **ptr;
299 last_seq &= drv->num_fences_mask;
300 ptr = &drv->fences[last_seq];
302 /* There is always exactly one thread signaling this fence slot */
303 fence = rcu_dereference_protected(*ptr, 1);
304 RCU_INIT_POINTER(*ptr, NULL);
309 dma_fence_signal(fence);
310 dma_fence_put(fence);
311 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
312 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
313 } while (last_seq != seq);
319 * amdgpu_fence_fallback - fallback for hardware interrupts
321 * @t: timer context used to obtain the pointer to ring structure
323 * Checks for fence activity.
325 static void amdgpu_fence_fallback(struct timer_list *t)
327 struct amdgpu_ring *ring = from_timer(ring, t,
328 fence_drv.fallback_timer);
330 if (amdgpu_fence_process(ring))
331 DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
335 * amdgpu_fence_wait_empty - wait for all fences to signal
337 * @ring: ring index the fence is associated with
339 * Wait for all fences on the requested ring to signal (all asics).
340 * Returns 0 if the fences have passed, error for all other cases.
342 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
344 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
345 struct dma_fence *fence, **ptr;
351 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
353 fence = rcu_dereference(*ptr);
354 if (!fence || !dma_fence_get_rcu(fence)) {
360 r = dma_fence_wait(fence, false);
361 dma_fence_put(fence);
366 * amdgpu_fence_wait_polling - busy wait for givn sequence number
368 * @ring: ring index the fence is associated with
369 * @wait_seq: sequence number to wait
370 * @timeout: the timeout for waiting in usecs
372 * Wait for all fences on the requested ring to signal (all asics).
373 * Returns left time if no timeout, 0 or minus if timeout.
375 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
380 while ((int32_t)(wait_seq - amdgpu_fence_read(ring)) > 0 && timeout > 0) {
384 return timeout > 0 ? timeout : 0;
387 * amdgpu_fence_count_emitted - get the count of emitted fences
389 * @ring: ring the fence is associated with
391 * Get the number of fences emitted on the requested ring (all asics).
392 * Returns the number of emitted fences on the ring. Used by the
393 * dynpm code to ring track activity.
395 unsigned int amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
399 /* We are not protected by ring lock when reading the last sequence
400 * but it's ok to report slightly wrong fence count here.
402 emitted = 0x100000000ull;
403 emitted -= atomic_read(&ring->fence_drv.last_seq);
404 emitted += READ_ONCE(ring->fence_drv.sync_seq);
405 return lower_32_bits(emitted);
409 * amdgpu_fence_last_unsignaled_time_us - the time fence emitted until now
410 * @ring: ring the fence is associated with
412 * Find the earliest fence unsignaled until now, calculate the time delta
413 * between the time fence emitted and now.
415 u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring)
417 struct amdgpu_fence_driver *drv = &ring->fence_drv;
418 struct dma_fence *fence;
419 uint32_t last_seq, sync_seq;
421 last_seq = atomic_read(&ring->fence_drv.last_seq);
422 sync_seq = READ_ONCE(ring->fence_drv.sync_seq);
423 if (last_seq == sync_seq)
427 last_seq &= drv->num_fences_mask;
428 fence = drv->fences[last_seq];
432 return ktime_us_delta(ktime_get(),
433 to_amdgpu_fence(fence)->start_timestamp);
437 * amdgpu_fence_update_start_timestamp - update the timestamp of the fence
438 * @ring: ring the fence is associated with
439 * @seq: the fence seq number to update.
440 * @timestamp: the start timestamp to update.
442 * The function called at the time the fence and related ib is about to
443 * resubmit to gpu in MCBP scenario. Thus we do not consider race condition
444 * with amdgpu_fence_process to modify the same fence.
446 void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, ktime_t timestamp)
448 struct amdgpu_fence_driver *drv = &ring->fence_drv;
449 struct dma_fence *fence;
451 seq &= drv->num_fences_mask;
452 fence = drv->fences[seq];
456 to_amdgpu_fence(fence)->start_timestamp = timestamp;
460 * amdgpu_fence_driver_start_ring - make the fence driver
461 * ready for use on the requested ring.
463 * @ring: ring to start the fence driver on
464 * @irq_src: interrupt source to use for this ring
465 * @irq_type: interrupt type to use for this ring
467 * Make the fence driver ready for processing (all asics).
468 * Not all asics have all rings, so each asic will only
469 * start the fence driver on the rings it has.
470 * Returns 0 for success, errors for failure.
472 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
473 struct amdgpu_irq_src *irq_src,
474 unsigned int irq_type)
476 struct amdgpu_device *adev = ring->adev;
479 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
480 ring->fence_drv.cpu_addr = ring->fence_cpu_addr;
481 ring->fence_drv.gpu_addr = ring->fence_gpu_addr;
483 /* put fence directly behind firmware */
484 index = ALIGN(adev->uvd.fw->size, 8);
485 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
486 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
488 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
490 ring->fence_drv.irq_src = irq_src;
491 ring->fence_drv.irq_type = irq_type;
492 ring->fence_drv.initialized = true;
494 DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n",
495 ring->name, ring->fence_drv.gpu_addr);
500 * amdgpu_fence_driver_init_ring - init the fence driver
501 * for the requested ring.
503 * @ring: ring to init the fence driver on
505 * Init the fence driver for the requested ring (all asics).
506 * Helper function for amdgpu_fence_driver_init().
508 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
510 struct amdgpu_device *adev = ring->adev;
515 if (!is_power_of_2(ring->num_hw_submission))
518 ring->fence_drv.cpu_addr = NULL;
519 ring->fence_drv.gpu_addr = 0;
520 ring->fence_drv.sync_seq = 0;
521 atomic_set(&ring->fence_drv.last_seq, 0);
522 ring->fence_drv.initialized = false;
524 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
526 ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1;
527 spin_lock_init(&ring->fence_drv.lock);
528 ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *),
531 if (!ring->fence_drv.fences)
538 * amdgpu_fence_driver_sw_init - init the fence driver
539 * for all possible rings.
541 * @adev: amdgpu device pointer
543 * Init the fence driver for all possible rings (all asics).
544 * Not all asics have all rings, so each asic will only
545 * start the fence driver on the rings it has using
546 * amdgpu_fence_driver_start_ring().
547 * Returns 0 for success.
549 int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
555 * amdgpu_fence_need_ring_interrupt_restore - helper function to check whether
556 * fence driver interrupts need to be restored.
558 * @ring: ring that to be checked
560 * Interrupts for rings that belong to GFX IP don't need to be restored
561 * when the target power state is s0ix.
563 * Return true if need to restore interrupts, false otherwise.
565 static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring)
567 struct amdgpu_device *adev = ring->adev;
568 bool is_gfx_power_domain = false;
570 switch (ring->funcs->type) {
571 case AMDGPU_RING_TYPE_SDMA:
572 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
573 if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
575 is_gfx_power_domain = true;
577 case AMDGPU_RING_TYPE_GFX:
578 case AMDGPU_RING_TYPE_COMPUTE:
579 case AMDGPU_RING_TYPE_KIQ:
580 case AMDGPU_RING_TYPE_MES:
581 is_gfx_power_domain = true;
587 return !(adev->in_s0ix && is_gfx_power_domain);
591 * amdgpu_fence_driver_hw_fini - tear down the fence driver
592 * for all possible rings.
594 * @adev: amdgpu device pointer
596 * Tear down the fence driver for all possible rings (all asics).
598 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
602 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
603 struct amdgpu_ring *ring = adev->rings[i];
605 if (!ring || !ring->fence_drv.initialized)
608 /* You can't wait for HW to signal if it's gone */
609 if (!drm_dev_is_unplugged(adev_to_drm(adev)))
610 r = amdgpu_fence_wait_empty(ring);
613 /* no need to trigger GPU reset as we are unloading */
615 amdgpu_fence_driver_force_completion(ring);
617 if (!drm_dev_is_unplugged(adev_to_drm(adev)) &&
618 ring->fence_drv.irq_src &&
619 amdgpu_fence_need_ring_interrupt_restore(ring))
620 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
621 ring->fence_drv.irq_type);
623 del_timer_sync(&ring->fence_drv.fallback_timer);
627 /* Will either stop and flush handlers for amdgpu interrupt or reanble it */
628 void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop)
632 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
633 struct amdgpu_ring *ring = adev->rings[i];
635 if (!ring || !ring->fence_drv.initialized || !ring->fence_drv.irq_src)
639 disable_irq(adev->irq.irq);
641 enable_irq(adev->irq.irq);
645 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
649 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
650 struct amdgpu_ring *ring = adev->rings[i];
652 if (!ring || !ring->fence_drv.initialized)
656 * Notice we check for sched.ops since there's some
657 * override on the meaning of sched.ready by amdgpu.
658 * The natural check would be sched.ready, which is
659 * set as drm_sched_init() finishes...
662 drm_sched_fini(&ring->sched);
664 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
665 dma_fence_put(ring->fence_drv.fences[j]);
666 kfree(ring->fence_drv.fences);
667 ring->fence_drv.fences = NULL;
668 ring->fence_drv.initialized = false;
673 * amdgpu_fence_driver_hw_init - enable the fence driver
674 * for all possible rings.
676 * @adev: amdgpu device pointer
678 * Enable the fence driver for all possible rings (all asics).
679 * Not all asics have all rings, so each asic will only
680 * start the fence driver on the rings it has using
681 * amdgpu_fence_driver_start_ring().
682 * Returns 0 for success.
684 void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
688 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
689 struct amdgpu_ring *ring = adev->rings[i];
691 if (!ring || !ring->fence_drv.initialized)
694 /* enable the interrupt */
695 if (ring->fence_drv.irq_src &&
696 amdgpu_fence_need_ring_interrupt_restore(ring))
697 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
698 ring->fence_drv.irq_type);
703 * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
705 * @ring: fence of the ring to be cleared
708 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
711 struct dma_fence *old, **ptr;
713 for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
714 ptr = &ring->fence_drv.fences[i];
715 old = rcu_dereference_protected(*ptr, 1);
716 if (old && old->ops == &amdgpu_job_fence_ops) {
717 struct amdgpu_job *job;
719 /* For non-scheduler bad job, i.e. failed ib test, we need to signal
720 * it right here or we won't be able to track them in fence_drv
721 * and they will remain unsignaled during sa_bo free.
723 job = container_of(old, struct amdgpu_job, hw_fence);
724 if (!job->base.s_fence && !dma_fence_is_signaled(old))
725 dma_fence_signal(old);
726 RCU_INIT_POINTER(*ptr, NULL);
733 * amdgpu_fence_driver_set_error - set error code on fences
734 * @ring: the ring which contains the fences
735 * @error: the error code to set
737 * Set an error code to all the fences pending on the ring.
739 void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error)
741 struct amdgpu_fence_driver *drv = &ring->fence_drv;
744 spin_lock_irqsave(&drv->lock, flags);
745 for (unsigned int i = 0; i <= drv->num_fences_mask; ++i) {
746 struct dma_fence *fence;
748 fence = rcu_dereference_protected(drv->fences[i],
749 lockdep_is_held(&drv->lock));
750 if (fence && !dma_fence_is_signaled_locked(fence))
751 dma_fence_set_error(fence, error);
753 spin_unlock_irqrestore(&drv->lock, flags);
757 * amdgpu_fence_driver_force_completion - force signal latest fence of ring
759 * @ring: fence of the ring to signal
762 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
764 amdgpu_fence_driver_set_error(ring, -ECANCELED);
765 amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
766 amdgpu_fence_process(ring);
770 * Common fence implementation
773 static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
778 static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
780 return (const char *)to_amdgpu_fence(f)->ring->name;
783 static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
785 struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
787 return (const char *)to_amdgpu_ring(job->base.sched)->name;
791 * amdgpu_fence_enable_signaling - enable signalling on fence
794 * This function is called with fence_queue lock held, and adds a callback
795 * to fence_queue that checks if this fence is signaled, and if so it
796 * signals the fence and removes itself.
798 static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
800 if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
801 amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
807 * amdgpu_job_fence_enable_signaling - enable signalling on job fence
810 * This is the simliar function with amdgpu_fence_enable_signaling above, it
811 * only handles the job embedded fence.
813 static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
815 struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
817 if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
818 amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
824 * amdgpu_fence_free - free up the fence memory
826 * @rcu: RCU callback head
828 * Free up the fence memory after the RCU grace period.
830 static void amdgpu_fence_free(struct rcu_head *rcu)
832 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
834 /* free fence_slab if it's separated fence*/
835 kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
839 * amdgpu_job_fence_free - free up the job with embedded fence
841 * @rcu: RCU callback head
843 * Free up the job with embedded fence after the RCU grace period.
845 static void amdgpu_job_fence_free(struct rcu_head *rcu)
847 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
849 /* free job if fence has a parent job */
850 kfree(container_of(f, struct amdgpu_job, hw_fence));
854 * amdgpu_fence_release - callback that fence can be freed
858 * This function is called when the reference count becomes zero.
859 * It just RCU schedules freeing up the fence.
861 static void amdgpu_fence_release(struct dma_fence *f)
863 call_rcu(&f->rcu, amdgpu_fence_free);
867 * amdgpu_job_fence_release - callback that job embedded fence can be freed
871 * This is the simliar function with amdgpu_fence_release above, it
872 * only handles the job embedded fence.
874 static void amdgpu_job_fence_release(struct dma_fence *f)
876 call_rcu(&f->rcu, amdgpu_job_fence_free);
879 static const struct dma_fence_ops amdgpu_fence_ops = {
880 .get_driver_name = amdgpu_fence_get_driver_name,
881 .get_timeline_name = amdgpu_fence_get_timeline_name,
882 .enable_signaling = amdgpu_fence_enable_signaling,
883 .release = amdgpu_fence_release,
886 static const struct dma_fence_ops amdgpu_job_fence_ops = {
887 .get_driver_name = amdgpu_fence_get_driver_name,
888 .get_timeline_name = amdgpu_job_fence_get_timeline_name,
889 .enable_signaling = amdgpu_job_fence_enable_signaling,
890 .release = amdgpu_job_fence_release,
896 #if defined(CONFIG_DEBUG_FS)
897 static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused)
899 struct amdgpu_device *adev = m->private;
902 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
903 struct amdgpu_ring *ring = adev->rings[i];
905 if (!ring || !ring->fence_drv.initialized)
908 amdgpu_fence_process(ring);
910 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
911 seq_printf(m, "Last signaled fence 0x%08x\n",
912 atomic_read(&ring->fence_drv.last_seq));
913 seq_printf(m, "Last emitted 0x%08x\n",
914 ring->fence_drv.sync_seq);
916 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
917 ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
918 seq_printf(m, "Last signaled trailing fence 0x%08x\n",
919 le32_to_cpu(*ring->trail_fence_cpu_addr));
920 seq_printf(m, "Last emitted 0x%08x\n",
924 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
927 /* set in CP_VMID_PREEMPT and preemption occurred */
928 seq_printf(m, "Last preempted 0x%08x\n",
929 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
930 /* set in CP_VMID_RESET and reset occurred */
931 seq_printf(m, "Last reset 0x%08x\n",
932 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
933 /* Both preemption and reset occurred */
934 seq_printf(m, "Last both 0x%08x\n",
935 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
941 * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
943 * Manually trigger a gpu reset at the next fence wait.
945 static int gpu_recover_get(void *data, u64 *val)
947 struct amdgpu_device *adev = (struct amdgpu_device *)data;
948 struct drm_device *dev = adev_to_drm(adev);
951 r = pm_runtime_get_sync(dev->dev);
953 pm_runtime_put_autosuspend(dev->dev);
957 if (amdgpu_reset_domain_schedule(adev->reset_domain, &adev->reset_work))
958 flush_work(&adev->reset_work);
960 *val = atomic_read(&adev->reset_domain->reset_res);
962 pm_runtime_mark_last_busy(dev->dev);
963 pm_runtime_put_autosuspend(dev->dev);
968 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info);
969 DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL,
972 static void amdgpu_debugfs_reset_work(struct work_struct *work)
974 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
977 struct amdgpu_reset_context reset_context;
979 memset(&reset_context, 0, sizeof(reset_context));
981 reset_context.method = AMD_RESET_METHOD_NONE;
982 reset_context.reset_req_dev = adev;
983 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
985 amdgpu_device_gpu_recover(adev, NULL, &reset_context);
990 void amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
992 #if defined(CONFIG_DEBUG_FS)
993 struct drm_minor *minor = adev_to_drm(adev)->primary;
994 struct dentry *root = minor->debugfs_root;
996 debugfs_create_file("amdgpu_fence_info", 0444, root, adev,
997 &amdgpu_debugfs_fence_info_fops);
999 if (!amdgpu_sriov_vf(adev)) {
1001 INIT_WORK(&adev->reset_work, amdgpu_debugfs_reset_work);
1002 debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev,
1003 &amdgpu_debugfs_gpu_recover_fops);