]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drm/amdgpu: rename amdgpu_gfx_compute_mqd_sw_init
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vcn.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <drm/drmP.h>
30 #include <drm/drm.h>
31
32 #include "amdgpu.h"
33 #include "amdgpu_pm.h"
34 #include "amdgpu_vcn.h"
35 #include "soc15d.h"
36 #include "soc15_common.h"
37
38 #include "vcn/vcn_1_0_offset.h"
39 #include "vcn/vcn_1_0_sh_mask.h"
40
41 /* 1 second timeout */
42 #define VCN_IDLE_TIMEOUT        msecs_to_jiffies(1000)
43
44 /* Firmware Names */
45 #define FIRMWARE_RAVEN          "amdgpu/raven_vcn.bin"
46 #define FIRMWARE_PICASSO        "amdgpu/picasso_vcn.bin"
47 #define FIRMWARE_RAVEN2         "amdgpu/raven2_vcn.bin"
48
49 MODULE_FIRMWARE(FIRMWARE_RAVEN);
50 MODULE_FIRMWARE(FIRMWARE_PICASSO);
51 MODULE_FIRMWARE(FIRMWARE_RAVEN2);
52
53 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
54
55 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
56 {
57         unsigned long bo_size;
58         const char *fw_name;
59         const struct common_firmware_header *hdr;
60         unsigned char fw_check;
61         int r;
62
63         INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
64
65         switch (adev->asic_type) {
66         case CHIP_RAVEN:
67                 if (adev->rev_id >= 8)
68                         fw_name = FIRMWARE_RAVEN2;
69                 else if (adev->pdev->device == 0x15d8)
70                         fw_name = FIRMWARE_PICASSO;
71                 else
72                         fw_name = FIRMWARE_RAVEN;
73                 break;
74         default:
75                 return -EINVAL;
76         }
77
78         r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
79         if (r) {
80                 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
81                         fw_name);
82                 return r;
83         }
84
85         r = amdgpu_ucode_validate(adev->vcn.fw);
86         if (r) {
87                 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
88                         fw_name);
89                 release_firmware(adev->vcn.fw);
90                 adev->vcn.fw = NULL;
91                 return r;
92         }
93
94         hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
95         adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
96
97         /* Bit 20-23, it is encode major and non-zero for new naming convention.
98          * This field is part of version minor and DRM_DISABLED_FLAG in old naming
99          * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
100          * is zero in old naming convention, this field is always zero so far.
101          * These four bits are used to tell which naming convention is present.
102          */
103         fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
104         if (fw_check) {
105                 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
106
107                 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
108                 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
109                 enc_major = fw_check;
110                 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
111                 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
112                 DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
113                         enc_major, enc_minor, dec_ver, vep, fw_rev);
114         } else {
115                 unsigned int version_major, version_minor, family_id;
116
117                 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
118                 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
119                 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
120                 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
121                         version_major, version_minor, family_id);
122         }
123
124         bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
125         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
126                 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
127         r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
128                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
129                                     &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
130         if (r) {
131                 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
132                 return r;
133         }
134
135         return 0;
136 }
137
138 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
139 {
140         int i;
141
142         kvfree(adev->vcn.saved_bo);
143
144         amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
145                               &adev->vcn.gpu_addr,
146                               (void **)&adev->vcn.cpu_addr);
147
148         amdgpu_ring_fini(&adev->vcn.ring_dec);
149
150         for (i = 0; i < adev->vcn.num_enc_rings; ++i)
151                 amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
152
153         amdgpu_ring_fini(&adev->vcn.ring_jpeg);
154
155         release_firmware(adev->vcn.fw);
156
157         return 0;
158 }
159
160 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
161 {
162         unsigned size;
163         void *ptr;
164
165         cancel_delayed_work_sync(&adev->vcn.idle_work);
166
167         if (adev->vcn.vcpu_bo == NULL)
168                 return 0;
169
170         size = amdgpu_bo_size(adev->vcn.vcpu_bo);
171         ptr = adev->vcn.cpu_addr;
172
173         adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
174         if (!adev->vcn.saved_bo)
175                 return -ENOMEM;
176
177         memcpy_fromio(adev->vcn.saved_bo, ptr, size);
178
179         return 0;
180 }
181
182 int amdgpu_vcn_resume(struct amdgpu_device *adev)
183 {
184         unsigned size;
185         void *ptr;
186
187         if (adev->vcn.vcpu_bo == NULL)
188                 return -EINVAL;
189
190         size = amdgpu_bo_size(adev->vcn.vcpu_bo);
191         ptr = adev->vcn.cpu_addr;
192
193         if (adev->vcn.saved_bo != NULL) {
194                 memcpy_toio(ptr, adev->vcn.saved_bo, size);
195                 kvfree(adev->vcn.saved_bo);
196                 adev->vcn.saved_bo = NULL;
197         } else {
198                 const struct common_firmware_header *hdr;
199                 unsigned offset;
200
201                 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
202                 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
203                         offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
204                         memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
205                                     le32_to_cpu(hdr->ucode_size_bytes));
206                         size -= le32_to_cpu(hdr->ucode_size_bytes);
207                         ptr += le32_to_cpu(hdr->ucode_size_bytes);
208                 }
209                 memset_io(ptr, 0, size);
210         }
211
212         return 0;
213 }
214
215 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
216 {
217         struct amdgpu_device *adev =
218                 container_of(work, struct amdgpu_device, vcn.idle_work.work);
219         unsigned int fences = 0;
220         unsigned int i;
221
222         for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
223                 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
224         }
225
226         if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)    {
227                 struct dpg_pause_state new_state;
228
229                 if (fences)
230                         new_state.fw_based = VCN_DPG_STATE__PAUSE;
231                 else
232                         new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
233
234                 if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
235                         new_state.jpeg = VCN_DPG_STATE__PAUSE;
236                 else
237                         new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
238
239                 adev->vcn.pause_dpg_mode(adev, &new_state);
240         }
241
242         fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
243         fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
244
245         if (fences == 0) {
246                 amdgpu_gfx_off_ctrl(adev, true);
247                 if (adev->pm.dpm_enabled)
248                         amdgpu_dpm_enable_uvd(adev, false);
249                 else
250                         amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
251                                                                AMD_PG_STATE_GATE);
252         } else {
253                 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
254         }
255 }
256
257 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
258 {
259         struct amdgpu_device *adev = ring->adev;
260         bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
261
262         if (set_clocks) {
263                 amdgpu_gfx_off_ctrl(adev, false);
264                 if (adev->pm.dpm_enabled)
265                         amdgpu_dpm_enable_uvd(adev, true);
266                 else
267                         amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
268                                                                AMD_PG_STATE_UNGATE);
269         }
270
271         if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)    {
272                 struct dpg_pause_state new_state;
273                 unsigned int fences = 0;
274                 unsigned int i;
275
276                 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
277                         fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
278                 }
279                 if (fences)
280                         new_state.fw_based = VCN_DPG_STATE__PAUSE;
281                 else
282                         new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
283
284                 if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
285                         new_state.jpeg = VCN_DPG_STATE__PAUSE;
286                 else
287                         new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
288
289                 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
290                         new_state.fw_based = VCN_DPG_STATE__PAUSE;
291                 else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
292                         new_state.jpeg = VCN_DPG_STATE__PAUSE;
293
294                 adev->vcn.pause_dpg_mode(adev, &new_state);
295         }
296 }
297
298 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
299 {
300         schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
301 }
302
303 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
304 {
305         struct amdgpu_device *adev = ring->adev;
306         uint32_t tmp = 0;
307         unsigned i;
308         int r;
309
310         WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
311         r = amdgpu_ring_alloc(ring, 3);
312         if (r)
313                 return r;
314
315         amdgpu_ring_write(ring,
316                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0));
317         amdgpu_ring_write(ring, 0xDEADBEEF);
318         amdgpu_ring_commit(ring);
319         for (i = 0; i < adev->usec_timeout; i++) {
320                 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
321                 if (tmp == 0xDEADBEEF)
322                         break;
323                 DRM_UDELAY(1);
324         }
325
326         if (i >= adev->usec_timeout)
327                 r = -ETIMEDOUT;
328
329         return r;
330 }
331
332 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
333                                    struct amdgpu_bo *bo,
334                                    struct dma_fence **fence)
335 {
336         struct amdgpu_device *adev = ring->adev;
337         struct dma_fence *f = NULL;
338         struct amdgpu_job *job;
339         struct amdgpu_ib *ib;
340         uint64_t addr;
341         int i, r;
342
343         r = amdgpu_job_alloc_with_ib(adev, 64, &job);
344         if (r)
345                 goto err;
346
347         ib = &job->ibs[0];
348         addr = amdgpu_bo_gpu_offset(bo);
349         ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0);
350         ib->ptr[1] = addr;
351         ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0);
352         ib->ptr[3] = addr >> 32;
353         ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0);
354         ib->ptr[5] = 0;
355         for (i = 6; i < 16; i += 2) {
356                 ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0);
357                 ib->ptr[i+1] = 0;
358         }
359         ib->length_dw = 16;
360
361         r = amdgpu_job_submit_direct(job, ring, &f);
362         if (r)
363                 goto err_free;
364
365         amdgpu_bo_fence(bo, f, false);
366         amdgpu_bo_unreserve(bo);
367         amdgpu_bo_unref(&bo);
368
369         if (fence)
370                 *fence = dma_fence_get(f);
371         dma_fence_put(f);
372
373         return 0;
374
375 err_free:
376         amdgpu_job_free(job);
377
378 err:
379         amdgpu_bo_unreserve(bo);
380         amdgpu_bo_unref(&bo);
381         return r;
382 }
383
384 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
385                               struct dma_fence **fence)
386 {
387         struct amdgpu_device *adev = ring->adev;
388         struct amdgpu_bo *bo = NULL;
389         uint32_t *msg;
390         int r, i;
391
392         r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
393                                       AMDGPU_GEM_DOMAIN_VRAM,
394                                       &bo, NULL, (void **)&msg);
395         if (r)
396                 return r;
397
398         msg[0] = cpu_to_le32(0x00000028);
399         msg[1] = cpu_to_le32(0x00000038);
400         msg[2] = cpu_to_le32(0x00000001);
401         msg[3] = cpu_to_le32(0x00000000);
402         msg[4] = cpu_to_le32(handle);
403         msg[5] = cpu_to_le32(0x00000000);
404         msg[6] = cpu_to_le32(0x00000001);
405         msg[7] = cpu_to_le32(0x00000028);
406         msg[8] = cpu_to_le32(0x00000010);
407         msg[9] = cpu_to_le32(0x00000000);
408         msg[10] = cpu_to_le32(0x00000007);
409         msg[11] = cpu_to_le32(0x00000000);
410         msg[12] = cpu_to_le32(0x00000780);
411         msg[13] = cpu_to_le32(0x00000440);
412         for (i = 14; i < 1024; ++i)
413                 msg[i] = cpu_to_le32(0x0);
414
415         return amdgpu_vcn_dec_send_msg(ring, bo, fence);
416 }
417
418 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
419                                struct dma_fence **fence)
420 {
421         struct amdgpu_device *adev = ring->adev;
422         struct amdgpu_bo *bo = NULL;
423         uint32_t *msg;
424         int r, i;
425
426         r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
427                                       AMDGPU_GEM_DOMAIN_VRAM,
428                                       &bo, NULL, (void **)&msg);
429         if (r)
430                 return r;
431
432         msg[0] = cpu_to_le32(0x00000028);
433         msg[1] = cpu_to_le32(0x00000018);
434         msg[2] = cpu_to_le32(0x00000000);
435         msg[3] = cpu_to_le32(0x00000002);
436         msg[4] = cpu_to_le32(handle);
437         msg[5] = cpu_to_le32(0x00000000);
438         for (i = 6; i < 1024; ++i)
439                 msg[i] = cpu_to_le32(0x0);
440
441         return amdgpu_vcn_dec_send_msg(ring, bo, fence);
442 }
443
444 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
445 {
446         struct dma_fence *fence;
447         long r;
448
449         r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
450         if (r)
451                 goto error;
452
453         r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
454         if (r)
455                 goto error;
456
457         r = dma_fence_wait_timeout(fence, false, timeout);
458         if (r == 0)
459                 r = -ETIMEDOUT;
460         else if (r > 0)
461                 r = 0;
462
463         dma_fence_put(fence);
464 error:
465         return r;
466 }
467
468 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
469 {
470         struct amdgpu_device *adev = ring->adev;
471         uint32_t rptr;
472         unsigned i;
473         int r;
474
475         r = amdgpu_ring_alloc(ring, 16);
476         if (r)
477                 return r;
478
479         rptr = amdgpu_ring_get_rptr(ring);
480
481         amdgpu_ring_write(ring, VCN_ENC_CMD_END);
482         amdgpu_ring_commit(ring);
483
484         for (i = 0; i < adev->usec_timeout; i++) {
485                 if (amdgpu_ring_get_rptr(ring) != rptr)
486                         break;
487                 DRM_UDELAY(1);
488         }
489
490         if (i >= adev->usec_timeout)
491                 r = -ETIMEDOUT;
492
493         return r;
494 }
495
496 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
497                               struct dma_fence **fence)
498 {
499         const unsigned ib_size_dw = 16;
500         struct amdgpu_job *job;
501         struct amdgpu_ib *ib;
502         struct dma_fence *f = NULL;
503         uint64_t dummy;
504         int i, r;
505
506         r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
507         if (r)
508                 return r;
509
510         ib = &job->ibs[0];
511         dummy = ib->gpu_addr + 1024;
512
513         ib->length_dw = 0;
514         ib->ptr[ib->length_dw++] = 0x00000018;
515         ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
516         ib->ptr[ib->length_dw++] = handle;
517         ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
518         ib->ptr[ib->length_dw++] = dummy;
519         ib->ptr[ib->length_dw++] = 0x0000000b;
520
521         ib->ptr[ib->length_dw++] = 0x00000014;
522         ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
523         ib->ptr[ib->length_dw++] = 0x0000001c;
524         ib->ptr[ib->length_dw++] = 0x00000000;
525         ib->ptr[ib->length_dw++] = 0x00000000;
526
527         ib->ptr[ib->length_dw++] = 0x00000008;
528         ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
529
530         for (i = ib->length_dw; i < ib_size_dw; ++i)
531                 ib->ptr[i] = 0x0;
532
533         r = amdgpu_job_submit_direct(job, ring, &f);
534         if (r)
535                 goto err;
536
537         if (fence)
538                 *fence = dma_fence_get(f);
539         dma_fence_put(f);
540
541         return 0;
542
543 err:
544         amdgpu_job_free(job);
545         return r;
546 }
547
548 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
549                                 struct dma_fence **fence)
550 {
551         const unsigned ib_size_dw = 16;
552         struct amdgpu_job *job;
553         struct amdgpu_ib *ib;
554         struct dma_fence *f = NULL;
555         uint64_t dummy;
556         int i, r;
557
558         r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
559         if (r)
560                 return r;
561
562         ib = &job->ibs[0];
563         dummy = ib->gpu_addr + 1024;
564
565         ib->length_dw = 0;
566         ib->ptr[ib->length_dw++] = 0x00000018;
567         ib->ptr[ib->length_dw++] = 0x00000001;
568         ib->ptr[ib->length_dw++] = handle;
569         ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
570         ib->ptr[ib->length_dw++] = dummy;
571         ib->ptr[ib->length_dw++] = 0x0000000b;
572
573         ib->ptr[ib->length_dw++] = 0x00000014;
574         ib->ptr[ib->length_dw++] = 0x00000002;
575         ib->ptr[ib->length_dw++] = 0x0000001c;
576         ib->ptr[ib->length_dw++] = 0x00000000;
577         ib->ptr[ib->length_dw++] = 0x00000000;
578
579         ib->ptr[ib->length_dw++] = 0x00000008;
580         ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
581
582         for (i = ib->length_dw; i < ib_size_dw; ++i)
583                 ib->ptr[i] = 0x0;
584
585         r = amdgpu_job_submit_direct(job, ring, &f);
586         if (r)
587                 goto err;
588
589         if (fence)
590                 *fence = dma_fence_get(f);
591         dma_fence_put(f);
592
593         return 0;
594
595 err:
596         amdgpu_job_free(job);
597         return r;
598 }
599
600 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
601 {
602         struct dma_fence *fence = NULL;
603         long r;
604
605         r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
606         if (r)
607                 goto error;
608
609         r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
610         if (r)
611                 goto error;
612
613         r = dma_fence_wait_timeout(fence, false, timeout);
614         if (r == 0)
615                 r = -ETIMEDOUT;
616         else if (r > 0)
617                 r = 0;
618
619 error:
620         dma_fence_put(fence);
621         return r;
622 }
623
624 int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
625 {
626         struct amdgpu_device *adev = ring->adev;
627         uint32_t tmp = 0;
628         unsigned i;
629         int r;
630
631         WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
632         r = amdgpu_ring_alloc(ring, 3);
633
634         if (r)
635                 return r;
636
637         amdgpu_ring_write(ring,
638                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, 0));
639         amdgpu_ring_write(ring, 0xDEADBEEF);
640         amdgpu_ring_commit(ring);
641
642         for (i = 0; i < adev->usec_timeout; i++) {
643                 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
644                 if (tmp == 0xDEADBEEF)
645                         break;
646                 DRM_UDELAY(1);
647         }
648
649         if (i >= adev->usec_timeout)
650                 r = -ETIMEDOUT;
651
652         return r;
653 }
654
655 static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
656                 struct dma_fence **fence)
657 {
658         struct amdgpu_device *adev = ring->adev;
659         struct amdgpu_job *job;
660         struct amdgpu_ib *ib;
661         struct dma_fence *f = NULL;
662         const unsigned ib_size_dw = 16;
663         int i, r;
664
665         r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
666         if (r)
667                 return r;
668
669         ib = &job->ibs[0];
670
671         ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, PACKETJ_TYPE0);
672         ib->ptr[1] = 0xDEADBEEF;
673         for (i = 2; i < 16; i += 2) {
674                 ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
675                 ib->ptr[i+1] = 0;
676         }
677         ib->length_dw = 16;
678
679         r = amdgpu_job_submit_direct(job, ring, &f);
680         if (r)
681                 goto err;
682
683         if (fence)
684                 *fence = dma_fence_get(f);
685         dma_fence_put(f);
686
687         return 0;
688
689 err:
690         amdgpu_job_free(job);
691         return r;
692 }
693
694 int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
695 {
696         struct amdgpu_device *adev = ring->adev;
697         uint32_t tmp = 0;
698         unsigned i;
699         struct dma_fence *fence = NULL;
700         long r = 0;
701
702         r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
703         if (r)
704                 goto error;
705
706         r = dma_fence_wait_timeout(fence, false, timeout);
707         if (r == 0) {
708                 r = -ETIMEDOUT;
709                 goto error;
710         } else if (r < 0) {
711                 goto error;
712         } else {
713                 r = 0;
714         }
715
716         for (i = 0; i < adev->usec_timeout; i++) {
717                 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
718                 if (tmp == 0xDEADBEEF)
719                         break;
720                 DRM_UDELAY(1);
721         }
722
723         if (i >= adev->usec_timeout)
724                 r = -ETIMEDOUT;
725
726         dma_fence_put(fence);
727 error:
728         return r;
729 }
This page took 0.079071 seconds and 4 git commands to generate.