]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
Merge tag 'ib-fbdev-drm-v4.19-deferred-console-takeover' of https://github.com/bzolni...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vcn.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <drm/drmP.h>
30 #include <drm/drm.h>
31
32 #include "amdgpu.h"
33 #include "amdgpu_pm.h"
34 #include "amdgpu_vcn.h"
35 #include "soc15d.h"
36 #include "soc15_common.h"
37
38 #include "vcn/vcn_1_0_offset.h"
39
40 /* 1 second timeout */
41 #define VCN_IDLE_TIMEOUT        msecs_to_jiffies(1000)
42
43 /* Firmware Names */
44 #define FIRMWARE_RAVEN          "amdgpu/raven_vcn.bin"
45
46 MODULE_FIRMWARE(FIRMWARE_RAVEN);
47
48 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
49
50 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
51 {
52         unsigned long bo_size;
53         const char *fw_name;
54         const struct common_firmware_header *hdr;
55         unsigned version_major, version_minor, family_id;
56         int r;
57
58         INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
59
60         switch (adev->asic_type) {
61         case CHIP_RAVEN:
62                 fw_name = FIRMWARE_RAVEN;
63                 break;
64         default:
65                 return -EINVAL;
66         }
67
68         r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
69         if (r) {
70                 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
71                         fw_name);
72                 return r;
73         }
74
75         r = amdgpu_ucode_validate(adev->vcn.fw);
76         if (r) {
77                 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
78                         fw_name);
79                 release_firmware(adev->vcn.fw);
80                 adev->vcn.fw = NULL;
81                 return r;
82         }
83
84         hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
85         adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
86         family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
87         version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
88         version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
89         DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
90                 version_major, version_minor, family_id);
91
92
93         bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
94                   +  AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
95                   +  AMDGPU_VCN_SESSION_SIZE * 40;
96         r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
97                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
98                                     &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
99         if (r) {
100                 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
101                 return r;
102         }
103
104         return 0;
105 }
106
107 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
108 {
109         int i;
110
111         kfree(adev->vcn.saved_bo);
112
113         amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
114                               &adev->vcn.gpu_addr,
115                               (void **)&adev->vcn.cpu_addr);
116
117         amdgpu_ring_fini(&adev->vcn.ring_dec);
118
119         for (i = 0; i < adev->vcn.num_enc_rings; ++i)
120                 amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
121
122         amdgpu_ring_fini(&adev->vcn.ring_jpeg);
123
124         release_firmware(adev->vcn.fw);
125
126         return 0;
127 }
128
129 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
130 {
131         unsigned size;
132         void *ptr;
133
134         if (adev->vcn.vcpu_bo == NULL)
135                 return 0;
136
137         cancel_delayed_work_sync(&adev->vcn.idle_work);
138
139         size = amdgpu_bo_size(adev->vcn.vcpu_bo);
140         ptr = adev->vcn.cpu_addr;
141
142         adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL);
143         if (!adev->vcn.saved_bo)
144                 return -ENOMEM;
145
146         memcpy_fromio(adev->vcn.saved_bo, ptr, size);
147
148         return 0;
149 }
150
151 int amdgpu_vcn_resume(struct amdgpu_device *adev)
152 {
153         unsigned size;
154         void *ptr;
155
156         if (adev->vcn.vcpu_bo == NULL)
157                 return -EINVAL;
158
159         size = amdgpu_bo_size(adev->vcn.vcpu_bo);
160         ptr = adev->vcn.cpu_addr;
161
162         if (adev->vcn.saved_bo != NULL) {
163                 memcpy_toio(ptr, adev->vcn.saved_bo, size);
164                 kfree(adev->vcn.saved_bo);
165                 adev->vcn.saved_bo = NULL;
166         } else {
167                 const struct common_firmware_header *hdr;
168                 unsigned offset;
169
170                 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
171                 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
172                 memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
173                             le32_to_cpu(hdr->ucode_size_bytes));
174                 size -= le32_to_cpu(hdr->ucode_size_bytes);
175                 ptr += le32_to_cpu(hdr->ucode_size_bytes);
176                 memset_io(ptr, 0, size);
177         }
178
179         return 0;
180 }
181
182 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
183 {
184         struct amdgpu_device *adev =
185                 container_of(work, struct amdgpu_device, vcn.idle_work.work);
186         unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
187         unsigned i;
188
189         for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
190                 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
191         }
192
193         if (fences == 0) {
194                 if (adev->pm.dpm_enabled)
195                         amdgpu_dpm_enable_uvd(adev, false);
196                 else
197                         amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
198                                                                AMD_PG_STATE_GATE);
199         } else {
200                 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
201         }
202 }
203
204 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
205 {
206         struct amdgpu_device *adev = ring->adev;
207         bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
208
209         if (set_clocks && adev->pm.dpm_enabled) {
210                 if (adev->pm.dpm_enabled)
211                         amdgpu_dpm_enable_uvd(adev, true);
212                 else
213                         amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
214                                                                AMD_PG_STATE_UNGATE);
215         }
216 }
217
218 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
219 {
220         schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
221 }
222
223 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
224 {
225         struct amdgpu_device *adev = ring->adev;
226         uint32_t tmp = 0;
227         unsigned i;
228         int r;
229
230         WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
231         r = amdgpu_ring_alloc(ring, 3);
232         if (r) {
233                 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
234                           ring->idx, r);
235                 return r;
236         }
237         amdgpu_ring_write(ring,
238                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
239         amdgpu_ring_write(ring, 0xDEADBEEF);
240         amdgpu_ring_commit(ring);
241         for (i = 0; i < adev->usec_timeout; i++) {
242                 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
243                 if (tmp == 0xDEADBEEF)
244                         break;
245                 DRM_UDELAY(1);
246         }
247
248         if (i < adev->usec_timeout) {
249                 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
250                          ring->idx, i);
251         } else {
252                 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
253                           ring->idx, tmp);
254                 r = -EINVAL;
255         }
256         return r;
257 }
258
259 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
260                                    struct amdgpu_bo *bo,
261                                    struct dma_fence **fence)
262 {
263         struct amdgpu_device *adev = ring->adev;
264         struct dma_fence *f = NULL;
265         struct amdgpu_job *job;
266         struct amdgpu_ib *ib;
267         uint64_t addr;
268         int i, r;
269
270         r = amdgpu_job_alloc_with_ib(adev, 64, &job);
271         if (r)
272                 goto err;
273
274         ib = &job->ibs[0];
275         addr = amdgpu_bo_gpu_offset(bo);
276         ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0);
277         ib->ptr[1] = addr;
278         ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0);
279         ib->ptr[3] = addr >> 32;
280         ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0);
281         ib->ptr[5] = 0;
282         for (i = 6; i < 16; i += 2) {
283                 ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0);
284                 ib->ptr[i+1] = 0;
285         }
286         ib->length_dw = 16;
287
288         r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
289         job->fence = dma_fence_get(f);
290         if (r)
291                 goto err_free;
292
293         amdgpu_job_free(job);
294
295         amdgpu_bo_fence(bo, f, false);
296         amdgpu_bo_unreserve(bo);
297         amdgpu_bo_unref(&bo);
298
299         if (fence)
300                 *fence = dma_fence_get(f);
301         dma_fence_put(f);
302
303         return 0;
304
305 err_free:
306         amdgpu_job_free(job);
307
308 err:
309         amdgpu_bo_unreserve(bo);
310         amdgpu_bo_unref(&bo);
311         return r;
312 }
313
314 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
315                               struct dma_fence **fence)
316 {
317         struct amdgpu_device *adev = ring->adev;
318         struct amdgpu_bo *bo = NULL;
319         uint32_t *msg;
320         int r, i;
321
322         r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
323                                       AMDGPU_GEM_DOMAIN_VRAM,
324                                       &bo, NULL, (void **)&msg);
325         if (r)
326                 return r;
327
328         msg[0] = cpu_to_le32(0x00000028);
329         msg[1] = cpu_to_le32(0x00000038);
330         msg[2] = cpu_to_le32(0x00000001);
331         msg[3] = cpu_to_le32(0x00000000);
332         msg[4] = cpu_to_le32(handle);
333         msg[5] = cpu_to_le32(0x00000000);
334         msg[6] = cpu_to_le32(0x00000001);
335         msg[7] = cpu_to_le32(0x00000028);
336         msg[8] = cpu_to_le32(0x00000010);
337         msg[9] = cpu_to_le32(0x00000000);
338         msg[10] = cpu_to_le32(0x00000007);
339         msg[11] = cpu_to_le32(0x00000000);
340         msg[12] = cpu_to_le32(0x00000780);
341         msg[13] = cpu_to_le32(0x00000440);
342         for (i = 14; i < 1024; ++i)
343                 msg[i] = cpu_to_le32(0x0);
344
345         return amdgpu_vcn_dec_send_msg(ring, bo, fence);
346 }
347
348 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
349                                struct dma_fence **fence)
350 {
351         struct amdgpu_device *adev = ring->adev;
352         struct amdgpu_bo *bo = NULL;
353         uint32_t *msg;
354         int r, i;
355
356         r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
357                                       AMDGPU_GEM_DOMAIN_VRAM,
358                                       &bo, NULL, (void **)&msg);
359         if (r)
360                 return r;
361
362         msg[0] = cpu_to_le32(0x00000028);
363         msg[1] = cpu_to_le32(0x00000018);
364         msg[2] = cpu_to_le32(0x00000000);
365         msg[3] = cpu_to_le32(0x00000002);
366         msg[4] = cpu_to_le32(handle);
367         msg[5] = cpu_to_le32(0x00000000);
368         for (i = 6; i < 1024; ++i)
369                 msg[i] = cpu_to_le32(0x0);
370
371         return amdgpu_vcn_dec_send_msg(ring, bo, fence);
372 }
373
374 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
375 {
376         struct dma_fence *fence;
377         long r;
378
379         r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
380         if (r) {
381                 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
382                 goto error;
383         }
384
385         r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
386         if (r) {
387                 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
388                 goto error;
389         }
390
391         r = dma_fence_wait_timeout(fence, false, timeout);
392         if (r == 0) {
393                 DRM_ERROR("amdgpu: IB test timed out.\n");
394                 r = -ETIMEDOUT;
395         } else if (r < 0) {
396                 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
397         } else {
398                 DRM_DEBUG("ib test on ring %d succeeded\n",  ring->idx);
399                 r = 0;
400         }
401
402         dma_fence_put(fence);
403
404 error:
405         return r;
406 }
407
408 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
409 {
410         struct amdgpu_device *adev = ring->adev;
411         uint32_t rptr = amdgpu_ring_get_rptr(ring);
412         unsigned i;
413         int r;
414
415         r = amdgpu_ring_alloc(ring, 16);
416         if (r) {
417                 DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
418                           ring->idx, r);
419                 return r;
420         }
421         amdgpu_ring_write(ring, VCN_ENC_CMD_END);
422         amdgpu_ring_commit(ring);
423
424         for (i = 0; i < adev->usec_timeout; i++) {
425                 if (amdgpu_ring_get_rptr(ring) != rptr)
426                         break;
427                 DRM_UDELAY(1);
428         }
429
430         if (i < adev->usec_timeout) {
431                 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
432                          ring->idx, i);
433         } else {
434                 DRM_ERROR("amdgpu: ring %d test failed\n",
435                           ring->idx);
436                 r = -ETIMEDOUT;
437         }
438
439         return r;
440 }
441
442 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
443                               struct dma_fence **fence)
444 {
445         const unsigned ib_size_dw = 16;
446         struct amdgpu_job *job;
447         struct amdgpu_ib *ib;
448         struct dma_fence *f = NULL;
449         uint64_t dummy;
450         int i, r;
451
452         r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
453         if (r)
454                 return r;
455
456         ib = &job->ibs[0];
457         dummy = ib->gpu_addr + 1024;
458
459         ib->length_dw = 0;
460         ib->ptr[ib->length_dw++] = 0x00000018;
461         ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
462         ib->ptr[ib->length_dw++] = handle;
463         ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
464         ib->ptr[ib->length_dw++] = dummy;
465         ib->ptr[ib->length_dw++] = 0x0000000b;
466
467         ib->ptr[ib->length_dw++] = 0x00000014;
468         ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
469         ib->ptr[ib->length_dw++] = 0x0000001c;
470         ib->ptr[ib->length_dw++] = 0x00000000;
471         ib->ptr[ib->length_dw++] = 0x00000000;
472
473         ib->ptr[ib->length_dw++] = 0x00000008;
474         ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
475
476         for (i = ib->length_dw; i < ib_size_dw; ++i)
477                 ib->ptr[i] = 0x0;
478
479         r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
480         job->fence = dma_fence_get(f);
481         if (r)
482                 goto err;
483
484         amdgpu_job_free(job);
485         if (fence)
486                 *fence = dma_fence_get(f);
487         dma_fence_put(f);
488
489         return 0;
490
491 err:
492         amdgpu_job_free(job);
493         return r;
494 }
495
496 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
497                                 struct dma_fence **fence)
498 {
499         const unsigned ib_size_dw = 16;
500         struct amdgpu_job *job;
501         struct amdgpu_ib *ib;
502         struct dma_fence *f = NULL;
503         uint64_t dummy;
504         int i, r;
505
506         r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
507         if (r)
508                 return r;
509
510         ib = &job->ibs[0];
511         dummy = ib->gpu_addr + 1024;
512
513         ib->length_dw = 0;
514         ib->ptr[ib->length_dw++] = 0x00000018;
515         ib->ptr[ib->length_dw++] = 0x00000001;
516         ib->ptr[ib->length_dw++] = handle;
517         ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
518         ib->ptr[ib->length_dw++] = dummy;
519         ib->ptr[ib->length_dw++] = 0x0000000b;
520
521         ib->ptr[ib->length_dw++] = 0x00000014;
522         ib->ptr[ib->length_dw++] = 0x00000002;
523         ib->ptr[ib->length_dw++] = 0x0000001c;
524         ib->ptr[ib->length_dw++] = 0x00000000;
525         ib->ptr[ib->length_dw++] = 0x00000000;
526
527         ib->ptr[ib->length_dw++] = 0x00000008;
528         ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
529
530         for (i = ib->length_dw; i < ib_size_dw; ++i)
531                 ib->ptr[i] = 0x0;
532
533         r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
534         job->fence = dma_fence_get(f);
535         if (r)
536                 goto err;
537
538         amdgpu_job_free(job);
539         if (fence)
540                 *fence = dma_fence_get(f);
541         dma_fence_put(f);
542
543         return 0;
544
545 err:
546         amdgpu_job_free(job);
547         return r;
548 }
549
550 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
551 {
552         struct dma_fence *fence = NULL;
553         long r;
554
555         r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
556         if (r) {
557                 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
558                 goto error;
559         }
560
561         r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
562         if (r) {
563                 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
564                 goto error;
565         }
566
567         r = dma_fence_wait_timeout(fence, false, timeout);
568         if (r == 0) {
569                 DRM_ERROR("amdgpu: IB test timed out.\n");
570                 r = -ETIMEDOUT;
571         } else if (r < 0) {
572                 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
573         } else {
574                 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
575                 r = 0;
576         }
577 error:
578         dma_fence_put(fence);
579         return r;
580 }
581
582 int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
583 {
584         struct amdgpu_device *adev = ring->adev;
585         uint32_t tmp = 0;
586         unsigned i;
587         int r;
588
589         WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
590         r = amdgpu_ring_alloc(ring, 3);
591
592         if (r) {
593                 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
594                                   ring->idx, r);
595                 return r;
596         }
597
598         amdgpu_ring_write(ring,
599                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0, 0, 0));
600         amdgpu_ring_write(ring, 0xDEADBEEF);
601         amdgpu_ring_commit(ring);
602
603         for (i = 0; i < adev->usec_timeout; i++) {
604                 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
605                 if (tmp == 0xDEADBEEF)
606                         break;
607                 DRM_UDELAY(1);
608         }
609
610         if (i < adev->usec_timeout) {
611                 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
612                                   ring->idx, i);
613         } else {
614                 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
615                                   ring->idx, tmp);
616                 r = -EINVAL;
617         }
618
619         return r;
620 }
621
622 static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
623                 struct dma_fence **fence)
624 {
625         struct amdgpu_device *adev = ring->adev;
626         struct amdgpu_job *job;
627         struct amdgpu_ib *ib;
628         struct dma_fence *f = NULL;
629         const unsigned ib_size_dw = 16;
630         int i, r;
631
632         r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
633         if (r)
634                 return r;
635
636         ib = &job->ibs[0];
637
638         ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH), 0, 0, PACKETJ_TYPE0);
639         ib->ptr[1] = 0xDEADBEEF;
640         for (i = 2; i < 16; i += 2) {
641                 ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
642                 ib->ptr[i+1] = 0;
643         }
644         ib->length_dw = 16;
645
646         r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
647         job->fence = dma_fence_get(f);
648         if (r)
649                 goto err;
650
651         amdgpu_job_free(job);
652         if (fence)
653                 *fence = dma_fence_get(f);
654         dma_fence_put(f);
655
656         return 0;
657
658 err:
659         amdgpu_job_free(job);
660         return r;
661 }
662
663 int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
664 {
665         struct amdgpu_device *adev = ring->adev;
666         uint32_t tmp = 0;
667         unsigned i;
668         struct dma_fence *fence = NULL;
669         long r = 0;
670
671         r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
672         if (r) {
673                 DRM_ERROR("amdgpu: failed to set jpeg register (%ld).\n", r);
674                 goto error;
675         }
676
677         r = dma_fence_wait_timeout(fence, false, timeout);
678         if (r == 0) {
679                 DRM_ERROR("amdgpu: IB test timed out.\n");
680                 r = -ETIMEDOUT;
681                 goto error;
682         } else if (r < 0) {
683                 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
684                 goto error;
685         } else
686                 r = 0;
687
688         for (i = 0; i < adev->usec_timeout; i++) {
689                 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH));
690                 if (tmp == 0xDEADBEEF)
691                         break;
692                 DRM_UDELAY(1);
693         }
694
695         if (i < adev->usec_timeout)
696                 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
697         else {
698                 DRM_ERROR("ib test failed (0x%08X)\n", tmp);
699                 r = -EINVAL;
700         }
701
702         dma_fence_put(fence);
703
704 error:
705         return r;
706 }
This page took 0.071701 seconds and 4 git commands to generate.