]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
Merge remote-tracking branch 'linuxtv/vsp1' into HEAD
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_fence.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <[email protected]>
29  *    Dave Airlie
30  */
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <drm/drmP.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
40
41 /*
42  * Fences
43  * Fences mark an event in the GPUs pipeline and are used
44  * for GPU/CPU synchronization.  When the fence is written,
45  * it is expected that all buffers associated with that fence
46  * are no longer in use by the associated ring on the GPU and
47  * that the the relevant GPU caches have been flushed.
48  */
49
50 static struct kmem_cache *amdgpu_fence_slab;
51 static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
52
53 /**
54  * amdgpu_fence_write - write a fence value
55  *
56  * @ring: ring the fence is associated with
57  * @seq: sequence number to write
58  *
59  * Writes a fence value to memory (all asics).
60  */
61 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
62 {
63         struct amdgpu_fence_driver *drv = &ring->fence_drv;
64
65         if (drv->cpu_addr)
66                 *drv->cpu_addr = cpu_to_le32(seq);
67 }
68
69 /**
70  * amdgpu_fence_read - read a fence value
71  *
72  * @ring: ring the fence is associated with
73  *
74  * Reads a fence value from memory (all asics).
75  * Returns the value of the fence read from memory.
76  */
77 static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
78 {
79         struct amdgpu_fence_driver *drv = &ring->fence_drv;
80         u32 seq = 0;
81
82         if (drv->cpu_addr)
83                 seq = le32_to_cpu(*drv->cpu_addr);
84         else
85                 seq = lower_32_bits(atomic64_read(&drv->last_seq));
86
87         return seq;
88 }
89
90 /**
91  * amdgpu_fence_emit - emit a fence on the requested ring
92  *
93  * @ring: ring the fence is associated with
94  * @owner: creator of the fence
95  * @fence: amdgpu fence object
96  *
97  * Emits a fence command on the requested ring (all asics).
98  * Returns 0 on success, -ENOMEM on failure.
99  */
100 int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
101                       struct amdgpu_fence **fence)
102 {
103         struct amdgpu_device *adev = ring->adev;
104
105         /* we are protected by the ring emission mutex */
106         *fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
107         if ((*fence) == NULL) {
108                 return -ENOMEM;
109         }
110         (*fence)->seq = ++ring->fence_drv.sync_seq;
111         (*fence)->ring = ring;
112         (*fence)->owner = owner;
113         fence_init(&(*fence)->base, &amdgpu_fence_ops,
114                 &ring->fence_drv.fence_queue.lock,
115                 adev->fence_context + ring->idx,
116                 (*fence)->seq);
117         amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
118                                (*fence)->seq,
119                                AMDGPU_FENCE_FLAG_INT);
120         return 0;
121 }
122
123 /**
124  * amdgpu_fence_schedule_fallback - schedule fallback check
125  *
126  * @ring: pointer to struct amdgpu_ring
127  *
128  * Start a timer as fallback to our interrupts.
129  */
130 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
131 {
132         mod_timer(&ring->fence_drv.fallback_timer,
133                   jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
134 }
135
136 /**
137  * amdgpu_fence_activity - check for fence activity
138  *
139  * @ring: pointer to struct amdgpu_ring
140  *
141  * Checks the current fence value and calculates the last
142  * signalled fence value. Returns true if activity occured
143  * on the ring, and the fence_queue should be waken up.
144  */
145 static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
146 {
147         uint64_t seq, last_seq, last_emitted;
148         unsigned count_loop = 0;
149         bool wake = false;
150
151         /* Note there is a scenario here for an infinite loop but it's
152          * very unlikely to happen. For it to happen, the current polling
153          * process need to be interrupted by another process and another
154          * process needs to update the last_seq btw the atomic read and
155          * xchg of the current process.
156          *
157          * More over for this to go in infinite loop there need to be
158          * continuously new fence signaled ie amdgpu_fence_read needs
159          * to return a different value each time for both the currently
160          * polling process and the other process that xchg the last_seq
161          * btw atomic read and xchg of the current process. And the
162          * value the other process set as last seq must be higher than
163          * the seq value we just read. Which means that current process
164          * need to be interrupted after amdgpu_fence_read and before
165          * atomic xchg.
166          *
167          * To be even more safe we count the number of time we loop and
168          * we bail after 10 loop just accepting the fact that we might
169          * have temporarly set the last_seq not to the true real last
170          * seq but to an older one.
171          */
172         last_seq = atomic64_read(&ring->fence_drv.last_seq);
173         do {
174                 last_emitted = ring->fence_drv.sync_seq;
175                 seq = amdgpu_fence_read(ring);
176                 seq |= last_seq & 0xffffffff00000000LL;
177                 if (seq < last_seq) {
178                         seq &= 0xffffffff;
179                         seq |= last_emitted & 0xffffffff00000000LL;
180                 }
181
182                 if (seq <= last_seq || seq > last_emitted) {
183                         break;
184                 }
185                 /* If we loop over we don't want to return without
186                  * checking if a fence is signaled as it means that the
187                  * seq we just read is different from the previous on.
188                  */
189                 wake = true;
190                 last_seq = seq;
191                 if ((count_loop++) > 10) {
192                         /* We looped over too many time leave with the
193                          * fact that we might have set an older fence
194                          * seq then the current real last seq as signaled
195                          * by the hw.
196                          */
197                         break;
198                 }
199         } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
200
201         if (seq < last_emitted)
202                 amdgpu_fence_schedule_fallback(ring);
203
204         return wake;
205 }
206
207 /**
208  * amdgpu_fence_process - process a fence
209  *
210  * @adev: amdgpu_device pointer
211  * @ring: ring index the fence is associated with
212  *
213  * Checks the current fence value and wakes the fence queue
214  * if the sequence number has increased (all asics).
215  */
216 void amdgpu_fence_process(struct amdgpu_ring *ring)
217 {
218         if (amdgpu_fence_activity(ring))
219                 wake_up_all(&ring->fence_drv.fence_queue);
220 }
221
222 /**
223  * amdgpu_fence_fallback - fallback for hardware interrupts
224  *
225  * @work: delayed work item
226  *
227  * Checks for fence activity.
228  */
229 static void amdgpu_fence_fallback(unsigned long arg)
230 {
231         struct amdgpu_ring *ring = (void *)arg;
232
233         amdgpu_fence_process(ring);
234 }
235
236 /**
237  * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
238  *
239  * @ring: ring the fence is associated with
240  * @seq: sequence number
241  *
242  * Check if the last signaled fence sequnce number is >= the requested
243  * sequence number (all asics).
244  * Returns true if the fence has signaled (current fence value
245  * is >= requested value) or false if it has not (current fence
246  * value is < the requested value.  Helper function for
247  * amdgpu_fence_signaled().
248  */
249 static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq)
250 {
251         if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
252                 return true;
253
254         /* poll new last sequence at least once */
255         amdgpu_fence_process(ring);
256         if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
257                 return true;
258
259         return false;
260 }
261
262 /*
263  * amdgpu_ring_wait_seq - wait for seq of the specific ring to signal
264  * @ring: ring to wait on for the seq number
265  * @seq: seq number wait for
266  *
267  * return value:
268  * 0: seq signaled, and gpu not hang
269  * -EINVAL: some paramter is not valid
270  */
271 static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
272 {
273         BUG_ON(!ring);
274         if (seq > ring->fence_drv.sync_seq)
275                 return -EINVAL;
276
277         if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
278                 return 0;
279
280         amdgpu_fence_schedule_fallback(ring);
281         wait_event(ring->fence_drv.fence_queue,
282                    amdgpu_fence_seq_signaled(ring, seq));
283
284         return 0;
285 }
286
287 /**
288  * amdgpu_fence_wait_next - wait for the next fence to signal
289  *
290  * @adev: amdgpu device pointer
291  * @ring: ring index the fence is associated with
292  *
293  * Wait for the next fence on the requested ring to signal (all asics).
294  * Returns 0 if the next fence has passed, error for all other cases.
295  * Caller must hold ring lock.
296  */
297 int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
298 {
299         uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
300
301         if (seq >= ring->fence_drv.sync_seq)
302                 return -ENOENT;
303
304         return amdgpu_fence_ring_wait_seq(ring, seq);
305 }
306
307 /**
308  * amdgpu_fence_wait_empty - wait for all fences to signal
309  *
310  * @adev: amdgpu device pointer
311  * @ring: ring index the fence is associated with
312  *
313  * Wait for all fences on the requested ring to signal (all asics).
314  * Returns 0 if the fences have passed, error for all other cases.
315  * Caller must hold ring lock.
316  */
317 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
318 {
319         uint64_t seq = ring->fence_drv.sync_seq;
320
321         if (!seq)
322                 return 0;
323
324         return amdgpu_fence_ring_wait_seq(ring, seq);
325 }
326
327 /**
328  * amdgpu_fence_count_emitted - get the count of emitted fences
329  *
330  * @ring: ring the fence is associated with
331  *
332  * Get the number of fences emitted on the requested ring (all asics).
333  * Returns the number of emitted fences on the ring.  Used by the
334  * dynpm code to ring track activity.
335  */
336 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
337 {
338         uint64_t emitted;
339
340         /* We are not protected by ring lock when reading the last sequence
341          * but it's ok to report slightly wrong fence count here.
342          */
343         amdgpu_fence_process(ring);
344         emitted = ring->fence_drv.sync_seq
345                 - atomic64_read(&ring->fence_drv.last_seq);
346         /* to avoid 32bits warp around */
347         if (emitted > 0x10000000)
348                 emitted = 0x10000000;
349
350         return (unsigned)emitted;
351 }
352
353 /**
354  * amdgpu_fence_driver_start_ring - make the fence driver
355  * ready for use on the requested ring.
356  *
357  * @ring: ring to start the fence driver on
358  * @irq_src: interrupt source to use for this ring
359  * @irq_type: interrupt type to use for this ring
360  *
361  * Make the fence driver ready for processing (all asics).
362  * Not all asics have all rings, so each asic will only
363  * start the fence driver on the rings it has.
364  * Returns 0 for success, errors for failure.
365  */
366 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
367                                    struct amdgpu_irq_src *irq_src,
368                                    unsigned irq_type)
369 {
370         struct amdgpu_device *adev = ring->adev;
371         uint64_t index;
372
373         if (ring != &adev->uvd.ring) {
374                 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
375                 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
376         } else {
377                 /* put fence directly behind firmware */
378                 index = ALIGN(adev->uvd.fw->size, 8);
379                 ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
380                 ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
381         }
382         amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
383         amdgpu_irq_get(adev, irq_src, irq_type);
384
385         ring->fence_drv.irq_src = irq_src;
386         ring->fence_drv.irq_type = irq_type;
387         ring->fence_drv.initialized = true;
388
389         dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
390                  "cpu addr 0x%p\n", ring->idx,
391                  ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
392         return 0;
393 }
394
395 /**
396  * amdgpu_fence_driver_init_ring - init the fence driver
397  * for the requested ring.
398  *
399  * @ring: ring to init the fence driver on
400  *
401  * Init the fence driver for the requested ring (all asics).
402  * Helper function for amdgpu_fence_driver_init().
403  */
404 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
405 {
406         long timeout;
407         int r;
408
409         ring->fence_drv.cpu_addr = NULL;
410         ring->fence_drv.gpu_addr = 0;
411         ring->fence_drv.sync_seq = 0;
412         atomic64_set(&ring->fence_drv.last_seq, 0);
413         ring->fence_drv.initialized = false;
414
415         setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
416                     (unsigned long)ring);
417
418         init_waitqueue_head(&ring->fence_drv.fence_queue);
419
420         timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
421         if (timeout == 0) {
422                 /*
423                  * FIXME:
424                  * Delayed workqueue cannot use it directly,
425                  * so the scheduler will not use delayed workqueue if
426                  * MAX_SCHEDULE_TIMEOUT is set.
427                  * Currently keep it simple and silly.
428                  */
429                 timeout = MAX_SCHEDULE_TIMEOUT;
430         }
431         r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
432                            amdgpu_sched_hw_submission,
433                            timeout, ring->name);
434         if (r) {
435                 DRM_ERROR("Failed to create scheduler on ring %s.\n",
436                           ring->name);
437                 return r;
438         }
439
440         return 0;
441 }
442
443 /**
444  * amdgpu_fence_driver_init - init the fence driver
445  * for all possible rings.
446  *
447  * @adev: amdgpu device pointer
448  *
449  * Init the fence driver for all possible rings (all asics).
450  * Not all asics have all rings, so each asic will only
451  * start the fence driver on the rings it has using
452  * amdgpu_fence_driver_start_ring().
453  * Returns 0 for success.
454  */
455 int amdgpu_fence_driver_init(struct amdgpu_device *adev)
456 {
457         if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
458                 amdgpu_fence_slab = kmem_cache_create(
459                         "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
460                         SLAB_HWCACHE_ALIGN, NULL);
461                 if (!amdgpu_fence_slab)
462                         return -ENOMEM;
463         }
464         if (amdgpu_debugfs_fence_init(adev))
465                 dev_err(adev->dev, "fence debugfs file creation failed\n");
466
467         return 0;
468 }
469
470 /**
471  * amdgpu_fence_driver_fini - tear down the fence driver
472  * for all possible rings.
473  *
474  * @adev: amdgpu device pointer
475  *
476  * Tear down the fence driver for all possible rings (all asics).
477  */
478 void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
479 {
480         int i, r;
481
482         if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
483                 kmem_cache_destroy(amdgpu_fence_slab);
484         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
485                 struct amdgpu_ring *ring = adev->rings[i];
486
487                 if (!ring || !ring->fence_drv.initialized)
488                         continue;
489                 r = amdgpu_fence_wait_empty(ring);
490                 if (r) {
491                         /* no need to trigger GPU reset as we are unloading */
492                         amdgpu_fence_driver_force_completion(adev);
493                 }
494                 wake_up_all(&ring->fence_drv.fence_queue);
495                 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
496                                ring->fence_drv.irq_type);
497                 amd_sched_fini(&ring->sched);
498                 del_timer_sync(&ring->fence_drv.fallback_timer);
499                 ring->fence_drv.initialized = false;
500         }
501 }
502
503 /**
504  * amdgpu_fence_driver_suspend - suspend the fence driver
505  * for all possible rings.
506  *
507  * @adev: amdgpu device pointer
508  *
509  * Suspend the fence driver for all possible rings (all asics).
510  */
511 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
512 {
513         int i, r;
514
515         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
516                 struct amdgpu_ring *ring = adev->rings[i];
517                 if (!ring || !ring->fence_drv.initialized)
518                         continue;
519
520                 /* wait for gpu to finish processing current batch */
521                 r = amdgpu_fence_wait_empty(ring);
522                 if (r) {
523                         /* delay GPU reset to resume */
524                         amdgpu_fence_driver_force_completion(adev);
525                 }
526
527                 /* disable the interrupt */
528                 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
529                                ring->fence_drv.irq_type);
530         }
531 }
532
533 /**
534  * amdgpu_fence_driver_resume - resume the fence driver
535  * for all possible rings.
536  *
537  * @adev: amdgpu device pointer
538  *
539  * Resume the fence driver for all possible rings (all asics).
540  * Not all asics have all rings, so each asic will only
541  * start the fence driver on the rings it has using
542  * amdgpu_fence_driver_start_ring().
543  * Returns 0 for success.
544  */
545 void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
546 {
547         int i;
548
549         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
550                 struct amdgpu_ring *ring = adev->rings[i];
551                 if (!ring || !ring->fence_drv.initialized)
552                         continue;
553
554                 /* enable the interrupt */
555                 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
556                                ring->fence_drv.irq_type);
557         }
558 }
559
560 /**
561  * amdgpu_fence_driver_force_completion - force all fence waiter to complete
562  *
563  * @adev: amdgpu device pointer
564  *
565  * In case of GPU reset failure make sure no process keep waiting on fence
566  * that will never complete.
567  */
568 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
569 {
570         int i;
571
572         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
573                 struct amdgpu_ring *ring = adev->rings[i];
574                 if (!ring || !ring->fence_drv.initialized)
575                         continue;
576
577                 amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
578         }
579 }
580
581 /*
582  * Common fence implementation
583  */
584
585 static const char *amdgpu_fence_get_driver_name(struct fence *fence)
586 {
587         return "amdgpu";
588 }
589
590 static const char *amdgpu_fence_get_timeline_name(struct fence *f)
591 {
592         struct amdgpu_fence *fence = to_amdgpu_fence(f);
593         return (const char *)fence->ring->name;
594 }
595
596 /**
597  * amdgpu_fence_is_signaled - test if fence is signaled
598  *
599  * @f: fence to test
600  *
601  * Test the fence sequence number if it is already signaled. If it isn't
602  * signaled start fence processing. Returns True if the fence is signaled.
603  */
604 static bool amdgpu_fence_is_signaled(struct fence *f)
605 {
606         struct amdgpu_fence *fence = to_amdgpu_fence(f);
607         struct amdgpu_ring *ring = fence->ring;
608
609         if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
610                 return true;
611
612         amdgpu_fence_process(ring);
613
614         if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
615                 return true;
616
617         return false;
618 }
619
620 /**
621  * amdgpu_fence_check_signaled - callback from fence_queue
622  *
623  * this function is called with fence_queue lock held, which is also used
624  * for the fence locking itself, so unlocked variants are used for
625  * fence_signal, and remove_wait_queue.
626  */
627 static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
628 {
629         struct amdgpu_fence *fence;
630         struct amdgpu_device *adev;
631         u64 seq;
632         int ret;
633
634         fence = container_of(wait, struct amdgpu_fence, fence_wake);
635         adev = fence->ring->adev;
636
637         /*
638          * We cannot use amdgpu_fence_process here because we're already
639          * in the waitqueue, in a call from wake_up_all.
640          */
641         seq = atomic64_read(&fence->ring->fence_drv.last_seq);
642         if (seq >= fence->seq) {
643                 ret = fence_signal_locked(&fence->base);
644                 if (!ret)
645                         FENCE_TRACE(&fence->base, "signaled from irq context\n");
646                 else
647                         FENCE_TRACE(&fence->base, "was already signaled\n");
648
649                 __remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
650                 fence_put(&fence->base);
651         } else
652                 FENCE_TRACE(&fence->base, "pending\n");
653         return 0;
654 }
655
656 /**
657  * amdgpu_fence_enable_signaling - enable signalling on fence
658  * @fence: fence
659  *
660  * This function is called with fence_queue lock held, and adds a callback
661  * to fence_queue that checks if this fence is signaled, and if so it
662  * signals the fence and removes itself.
663  */
664 static bool amdgpu_fence_enable_signaling(struct fence *f)
665 {
666         struct amdgpu_fence *fence = to_amdgpu_fence(f);
667         struct amdgpu_ring *ring = fence->ring;
668
669         if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
670                 return false;
671
672         fence->fence_wake.flags = 0;
673         fence->fence_wake.private = NULL;
674         fence->fence_wake.func = amdgpu_fence_check_signaled;
675         __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
676         fence_get(f);
677         if (!timer_pending(&ring->fence_drv.fallback_timer))
678                 amdgpu_fence_schedule_fallback(ring);
679         FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
680         return true;
681 }
682
683 static void amdgpu_fence_release(struct fence *f)
684 {
685         struct amdgpu_fence *fence = to_amdgpu_fence(f);
686         kmem_cache_free(amdgpu_fence_slab, fence);
687 }
688
689 const struct fence_ops amdgpu_fence_ops = {
690         .get_driver_name = amdgpu_fence_get_driver_name,
691         .get_timeline_name = amdgpu_fence_get_timeline_name,
692         .enable_signaling = amdgpu_fence_enable_signaling,
693         .signaled = amdgpu_fence_is_signaled,
694         .wait = fence_default_wait,
695         .release = amdgpu_fence_release,
696 };
697
698 /*
699  * Fence debugfs
700  */
701 #if defined(CONFIG_DEBUG_FS)
702 static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
703 {
704         struct drm_info_node *node = (struct drm_info_node *)m->private;
705         struct drm_device *dev = node->minor->dev;
706         struct amdgpu_device *adev = dev->dev_private;
707         int i;
708
709         for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
710                 struct amdgpu_ring *ring = adev->rings[i];
711                 if (!ring || !ring->fence_drv.initialized)
712                         continue;
713
714                 amdgpu_fence_process(ring);
715
716                 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
717                 seq_printf(m, "Last signaled fence 0x%016llx\n",
718                            (unsigned long long)atomic64_read(&ring->fence_drv.last_seq));
719                 seq_printf(m, "Last emitted        0x%016llx\n",
720                            ring->fence_drv.sync_seq);
721         }
722         return 0;
723 }
724
725 /**
726  * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset
727  *
728  * Manually trigger a gpu reset at the next fence wait.
729  */
730 static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
731 {
732         struct drm_info_node *node = (struct drm_info_node *) m->private;
733         struct drm_device *dev = node->minor->dev;
734         struct amdgpu_device *adev = dev->dev_private;
735
736         seq_printf(m, "gpu reset\n");
737         amdgpu_gpu_reset(adev);
738
739         return 0;
740 }
741
742 static struct drm_info_list amdgpu_debugfs_fence_list[] = {
743         {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
744         {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
745 };
746 #endif
747
748 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
749 {
750 #if defined(CONFIG_DEBUG_FS)
751         return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
752 #else
753         return 0;
754 #endif
755 }
756
This page took 0.075516 seconds and 4 git commands to generate.