]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
Merge branch 'psmouse' into next
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vcn.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <drm/drmP.h>
30 #include <drm/drm.h>
31
32 #include "amdgpu.h"
33 #include "amdgpu_pm.h"
34 #include "amdgpu_vcn.h"
35 #include "soc15d.h"
36 #include "soc15_common.h"
37
38 #include "vcn/vcn_1_0_offset.h"
39
40 /* 1 second timeout */
41 #define VCN_IDLE_TIMEOUT        msecs_to_jiffies(1000)
42
43 /* Firmware Names */
44 #define FIRMWARE_RAVEN          "amdgpu/raven_vcn.bin"
45
46 MODULE_FIRMWARE(FIRMWARE_RAVEN);
47
48 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
49
50 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
51 {
52         struct amdgpu_ring *ring;
53         struct drm_sched_rq *rq;
54         unsigned long bo_size;
55         const char *fw_name;
56         const struct common_firmware_header *hdr;
57         unsigned version_major, version_minor, family_id;
58         int r;
59
60         INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
61
62         switch (adev->asic_type) {
63         case CHIP_RAVEN:
64                 fw_name = FIRMWARE_RAVEN;
65                 break;
66         default:
67                 return -EINVAL;
68         }
69
70         r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
71         if (r) {
72                 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
73                         fw_name);
74                 return r;
75         }
76
77         r = amdgpu_ucode_validate(adev->vcn.fw);
78         if (r) {
79                 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
80                         fw_name);
81                 release_firmware(adev->vcn.fw);
82                 adev->vcn.fw = NULL;
83                 return r;
84         }
85
86         hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
87         family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
88         version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
89         version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
90         DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
91                 version_major, version_minor, family_id);
92
93
94         bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
95                   +  AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
96                   +  AMDGPU_VCN_SESSION_SIZE * 40;
97         r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
98                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
99                                     &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
100         if (r) {
101                 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
102                 return r;
103         }
104
105         ring = &adev->vcn.ring_dec;
106         rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
107         r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
108                                   rq, amdgpu_sched_jobs, NULL);
109         if (r != 0) {
110                 DRM_ERROR("Failed setting up VCN dec run queue.\n");
111                 return r;
112         }
113
114         ring = &adev->vcn.ring_enc[0];
115         rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
116         r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
117                                   rq, amdgpu_sched_jobs, NULL);
118         if (r != 0) {
119                 DRM_ERROR("Failed setting up VCN enc run queue.\n");
120                 return r;
121         }
122
123         return 0;
124 }
125
126 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
127 {
128         int i;
129
130         kfree(adev->vcn.saved_bo);
131
132         drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
133
134         drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
135
136         amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
137                               &adev->vcn.gpu_addr,
138                               (void **)&adev->vcn.cpu_addr);
139
140         amdgpu_ring_fini(&adev->vcn.ring_dec);
141
142         for (i = 0; i < adev->vcn.num_enc_rings; ++i)
143                 amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
144
145         release_firmware(adev->vcn.fw);
146
147         return 0;
148 }
149
150 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
151 {
152         unsigned size;
153         void *ptr;
154
155         if (adev->vcn.vcpu_bo == NULL)
156                 return 0;
157
158         cancel_delayed_work_sync(&adev->vcn.idle_work);
159
160         size = amdgpu_bo_size(adev->vcn.vcpu_bo);
161         ptr = adev->vcn.cpu_addr;
162
163         adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL);
164         if (!adev->vcn.saved_bo)
165                 return -ENOMEM;
166
167         memcpy_fromio(adev->vcn.saved_bo, ptr, size);
168
169         return 0;
170 }
171
172 int amdgpu_vcn_resume(struct amdgpu_device *adev)
173 {
174         unsigned size;
175         void *ptr;
176
177         if (adev->vcn.vcpu_bo == NULL)
178                 return -EINVAL;
179
180         size = amdgpu_bo_size(adev->vcn.vcpu_bo);
181         ptr = adev->vcn.cpu_addr;
182
183         if (adev->vcn.saved_bo != NULL) {
184                 memcpy_toio(ptr, adev->vcn.saved_bo, size);
185                 kfree(adev->vcn.saved_bo);
186                 adev->vcn.saved_bo = NULL;
187         } else {
188                 const struct common_firmware_header *hdr;
189                 unsigned offset;
190
191                 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
192                 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
193                 memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
194                             le32_to_cpu(hdr->ucode_size_bytes));
195                 size -= le32_to_cpu(hdr->ucode_size_bytes);
196                 ptr += le32_to_cpu(hdr->ucode_size_bytes);
197                 memset_io(ptr, 0, size);
198         }
199
200         return 0;
201 }
202
203 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
204 {
205         struct amdgpu_device *adev =
206                 container_of(work, struct amdgpu_device, vcn.idle_work.work);
207         unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
208
209         if (fences == 0) {
210                 if (adev->pm.dpm_enabled) {
211                         /* might be used when with pg/cg
212                         amdgpu_dpm_enable_uvd(adev, false);
213                         */
214                 }
215         } else {
216                 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
217         }
218 }
219
220 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
221 {
222         struct amdgpu_device *adev = ring->adev;
223         bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
224
225         if (set_clocks && adev->pm.dpm_enabled) {
226                 /* might be used when with pg/cg
227                 amdgpu_dpm_enable_uvd(adev, true);
228                 */
229         }
230 }
231
232 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
233 {
234         schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
235 }
236
237 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
238 {
239         struct amdgpu_device *adev = ring->adev;
240         uint32_t tmp = 0;
241         unsigned i;
242         int r;
243
244         WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
245         r = amdgpu_ring_alloc(ring, 3);
246         if (r) {
247                 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
248                           ring->idx, r);
249                 return r;
250         }
251         amdgpu_ring_write(ring,
252                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
253         amdgpu_ring_write(ring, 0xDEADBEEF);
254         amdgpu_ring_commit(ring);
255         for (i = 0; i < adev->usec_timeout; i++) {
256                 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
257                 if (tmp == 0xDEADBEEF)
258                         break;
259                 DRM_UDELAY(1);
260         }
261
262         if (i < adev->usec_timeout) {
263                 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
264                          ring->idx, i);
265         } else {
266                 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
267                           ring->idx, tmp);
268                 r = -EINVAL;
269         }
270         return r;
271 }
272
273 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
274                                bool direct, struct dma_fence **fence)
275 {
276         struct ttm_operation_ctx ctx = { true, false };
277         struct ttm_validate_buffer tv;
278         struct ww_acquire_ctx ticket;
279         struct list_head head;
280         struct amdgpu_job *job;
281         struct amdgpu_ib *ib;
282         struct dma_fence *f = NULL;
283         struct amdgpu_device *adev = ring->adev;
284         uint64_t addr;
285         int i, r;
286
287         memset(&tv, 0, sizeof(tv));
288         tv.bo = &bo->tbo;
289
290         INIT_LIST_HEAD(&head);
291         list_add(&tv.head, &head);
292
293         r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
294         if (r)
295                 return r;
296
297         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
298         if (r)
299                 goto err;
300
301         r = amdgpu_job_alloc_with_ib(adev, 64, &job);
302         if (r)
303                 goto err;
304
305         ib = &job->ibs[0];
306         addr = amdgpu_bo_gpu_offset(bo);
307         ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0);
308         ib->ptr[1] = addr;
309         ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0);
310         ib->ptr[3] = addr >> 32;
311         ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0);
312         ib->ptr[5] = 0;
313         for (i = 6; i < 16; i += 2) {
314                 ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0);
315                 ib->ptr[i+1] = 0;
316         }
317         ib->length_dw = 16;
318
319         if (direct) {
320                 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
321                 job->fence = dma_fence_get(f);
322                 if (r)
323                         goto err_free;
324
325                 amdgpu_job_free(job);
326         } else {
327                 r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec,
328                                       AMDGPU_FENCE_OWNER_UNDEFINED, &f);
329                 if (r)
330                         goto err_free;
331         }
332
333         ttm_eu_fence_buffer_objects(&ticket, &head, f);
334
335         if (fence)
336                 *fence = dma_fence_get(f);
337         amdgpu_bo_unref(&bo);
338         dma_fence_put(f);
339
340         return 0;
341
342 err_free:
343         amdgpu_job_free(job);
344
345 err:
346         ttm_eu_backoff_reservation(&ticket, &head);
347         return r;
348 }
349
350 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
351                               struct dma_fence **fence)
352 {
353         struct amdgpu_device *adev = ring->adev;
354         struct amdgpu_bo *bo;
355         uint32_t *msg;
356         int r, i;
357
358         r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
359                              AMDGPU_GEM_DOMAIN_VRAM,
360                              AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
361                              AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
362                              NULL, NULL, 0, &bo);
363         if (r)
364                 return r;
365
366         r = amdgpu_bo_reserve(bo, false);
367         if (r) {
368                 amdgpu_bo_unref(&bo);
369                 return r;
370         }
371
372         r = amdgpu_bo_kmap(bo, (void **)&msg);
373         if (r) {
374                 amdgpu_bo_unreserve(bo);
375                 amdgpu_bo_unref(&bo);
376                 return r;
377         }
378
379         msg[0] = cpu_to_le32(0x00000028);
380         msg[1] = cpu_to_le32(0x00000038);
381         msg[2] = cpu_to_le32(0x00000001);
382         msg[3] = cpu_to_le32(0x00000000);
383         msg[4] = cpu_to_le32(handle);
384         msg[5] = cpu_to_le32(0x00000000);
385         msg[6] = cpu_to_le32(0x00000001);
386         msg[7] = cpu_to_le32(0x00000028);
387         msg[8] = cpu_to_le32(0x00000010);
388         msg[9] = cpu_to_le32(0x00000000);
389         msg[10] = cpu_to_le32(0x00000007);
390         msg[11] = cpu_to_le32(0x00000000);
391         msg[12] = cpu_to_le32(0x00000780);
392         msg[13] = cpu_to_le32(0x00000440);
393         for (i = 14; i < 1024; ++i)
394                 msg[i] = cpu_to_le32(0x0);
395
396         amdgpu_bo_kunmap(bo);
397         amdgpu_bo_unreserve(bo);
398
399         return amdgpu_vcn_dec_send_msg(ring, bo, true, fence);
400 }
401
402 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
403                                bool direct, struct dma_fence **fence)
404 {
405         struct amdgpu_device *adev = ring->adev;
406         struct amdgpu_bo *bo;
407         uint32_t *msg;
408         int r, i;
409
410         r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
411                              AMDGPU_GEM_DOMAIN_VRAM,
412                              AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
413                              AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
414                              NULL, NULL, 0, &bo);
415         if (r)
416                 return r;
417
418         r = amdgpu_bo_reserve(bo, false);
419         if (r) {
420                 amdgpu_bo_unref(&bo);
421                 return r;
422         }
423
424         r = amdgpu_bo_kmap(bo, (void **)&msg);
425         if (r) {
426                 amdgpu_bo_unreserve(bo);
427                 amdgpu_bo_unref(&bo);
428                 return r;
429         }
430
431         msg[0] = cpu_to_le32(0x00000028);
432         msg[1] = cpu_to_le32(0x00000018);
433         msg[2] = cpu_to_le32(0x00000000);
434         msg[3] = cpu_to_le32(0x00000002);
435         msg[4] = cpu_to_le32(handle);
436         msg[5] = cpu_to_le32(0x00000000);
437         for (i = 6; i < 1024; ++i)
438                 msg[i] = cpu_to_le32(0x0);
439
440         amdgpu_bo_kunmap(bo);
441         amdgpu_bo_unreserve(bo);
442
443         return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
444 }
445
446 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
447 {
448         struct dma_fence *fence;
449         long r;
450
451         r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
452         if (r) {
453                 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
454                 goto error;
455         }
456
457         r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence);
458         if (r) {
459                 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
460                 goto error;
461         }
462
463         r = dma_fence_wait_timeout(fence, false, timeout);
464         if (r == 0) {
465                 DRM_ERROR("amdgpu: IB test timed out.\n");
466                 r = -ETIMEDOUT;
467         } else if (r < 0) {
468                 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
469         } else {
470                 DRM_DEBUG("ib test on ring %d succeeded\n",  ring->idx);
471                 r = 0;
472         }
473
474         dma_fence_put(fence);
475
476 error:
477         return r;
478 }
479
480 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
481 {
482         struct amdgpu_device *adev = ring->adev;
483         uint32_t rptr = amdgpu_ring_get_rptr(ring);
484         unsigned i;
485         int r;
486
487         r = amdgpu_ring_alloc(ring, 16);
488         if (r) {
489                 DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
490                           ring->idx, r);
491                 return r;
492         }
493         amdgpu_ring_write(ring, VCN_ENC_CMD_END);
494         amdgpu_ring_commit(ring);
495
496         for (i = 0; i < adev->usec_timeout; i++) {
497                 if (amdgpu_ring_get_rptr(ring) != rptr)
498                         break;
499                 DRM_UDELAY(1);
500         }
501
502         if (i < adev->usec_timeout) {
503                 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
504                          ring->idx, i);
505         } else {
506                 DRM_ERROR("amdgpu: ring %d test failed\n",
507                           ring->idx);
508                 r = -ETIMEDOUT;
509         }
510
511         return r;
512 }
513
514 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
515                               struct dma_fence **fence)
516 {
517         const unsigned ib_size_dw = 16;
518         struct amdgpu_job *job;
519         struct amdgpu_ib *ib;
520         struct dma_fence *f = NULL;
521         uint64_t dummy;
522         int i, r;
523
524         r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
525         if (r)
526                 return r;
527
528         ib = &job->ibs[0];
529         dummy = ib->gpu_addr + 1024;
530
531         ib->length_dw = 0;
532         ib->ptr[ib->length_dw++] = 0x00000018;
533         ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
534         ib->ptr[ib->length_dw++] = handle;
535         ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
536         ib->ptr[ib->length_dw++] = dummy;
537         ib->ptr[ib->length_dw++] = 0x0000000b;
538
539         ib->ptr[ib->length_dw++] = 0x00000014;
540         ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
541         ib->ptr[ib->length_dw++] = 0x0000001c;
542         ib->ptr[ib->length_dw++] = 0x00000000;
543         ib->ptr[ib->length_dw++] = 0x00000000;
544
545         ib->ptr[ib->length_dw++] = 0x00000008;
546         ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
547
548         for (i = ib->length_dw; i < ib_size_dw; ++i)
549                 ib->ptr[i] = 0x0;
550
551         r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
552         job->fence = dma_fence_get(f);
553         if (r)
554                 goto err;
555
556         amdgpu_job_free(job);
557         if (fence)
558                 *fence = dma_fence_get(f);
559         dma_fence_put(f);
560
561         return 0;
562
563 err:
564         amdgpu_job_free(job);
565         return r;
566 }
567
568 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
569                                 struct dma_fence **fence)
570 {
571         const unsigned ib_size_dw = 16;
572         struct amdgpu_job *job;
573         struct amdgpu_ib *ib;
574         struct dma_fence *f = NULL;
575         uint64_t dummy;
576         int i, r;
577
578         r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
579         if (r)
580                 return r;
581
582         ib = &job->ibs[0];
583         dummy = ib->gpu_addr + 1024;
584
585         ib->length_dw = 0;
586         ib->ptr[ib->length_dw++] = 0x00000018;
587         ib->ptr[ib->length_dw++] = 0x00000001;
588         ib->ptr[ib->length_dw++] = handle;
589         ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
590         ib->ptr[ib->length_dw++] = dummy;
591         ib->ptr[ib->length_dw++] = 0x0000000b;
592
593         ib->ptr[ib->length_dw++] = 0x00000014;
594         ib->ptr[ib->length_dw++] = 0x00000002;
595         ib->ptr[ib->length_dw++] = 0x0000001c;
596         ib->ptr[ib->length_dw++] = 0x00000000;
597         ib->ptr[ib->length_dw++] = 0x00000000;
598
599         ib->ptr[ib->length_dw++] = 0x00000008;
600         ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
601
602         for (i = ib->length_dw; i < ib_size_dw; ++i)
603                 ib->ptr[i] = 0x0;
604
605         r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
606         job->fence = dma_fence_get(f);
607         if (r)
608                 goto err;
609
610         amdgpu_job_free(job);
611         if (fence)
612                 *fence = dma_fence_get(f);
613         dma_fence_put(f);
614
615         return 0;
616
617 err:
618         amdgpu_job_free(job);
619         return r;
620 }
621
622 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
623 {
624         struct dma_fence *fence = NULL;
625         long r;
626
627         r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
628         if (r) {
629                 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
630                 goto error;
631         }
632
633         r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
634         if (r) {
635                 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
636                 goto error;
637         }
638
639         r = dma_fence_wait_timeout(fence, false, timeout);
640         if (r == 0) {
641                 DRM_ERROR("amdgpu: IB test timed out.\n");
642                 r = -ETIMEDOUT;
643         } else if (r < 0) {
644                 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
645         } else {
646                 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
647                 r = 0;
648         }
649 error:
650         dma_fence_put(fence);
651         return r;
652 }
This page took 0.069976 seconds and 4 git commands to generate.