]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
Merge tag 'for-airlie-tda998x' of git://git.armlinux.org.uk/~rmk/linux-arm into drm...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vcn.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30
31 #include <drm/drm.h>
32
33 #include "amdgpu.h"
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vcn.h"
36 #include "soc15d.h"
37 #include "soc15_common.h"
38
39 #include "vcn/vcn_1_0_offset.h"
40 #include "vcn/vcn_1_0_sh_mask.h"
41
42 /* 1 second timeout */
43 #define VCN_IDLE_TIMEOUT        msecs_to_jiffies(1000)
44
45 /* Firmware Names */
46 #define FIRMWARE_RAVEN          "amdgpu/raven_vcn.bin"
47 #define FIRMWARE_PICASSO        "amdgpu/picasso_vcn.bin"
48 #define FIRMWARE_RAVEN2         "amdgpu/raven2_vcn.bin"
49
50 MODULE_FIRMWARE(FIRMWARE_RAVEN);
51 MODULE_FIRMWARE(FIRMWARE_PICASSO);
52 MODULE_FIRMWARE(FIRMWARE_RAVEN2);
53
54 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
55
56 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
57 {
58         unsigned long bo_size;
59         const char *fw_name;
60         const struct common_firmware_header *hdr;
61         unsigned char fw_check;
62         int r;
63
64         INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
65
66         switch (adev->asic_type) {
67         case CHIP_RAVEN:
68                 if (adev->rev_id >= 8)
69                         fw_name = FIRMWARE_RAVEN2;
70                 else if (adev->pdev->device == 0x15d8)
71                         fw_name = FIRMWARE_PICASSO;
72                 else
73                         fw_name = FIRMWARE_RAVEN;
74                 break;
75         default:
76                 return -EINVAL;
77         }
78
79         r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
80         if (r) {
81                 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
82                         fw_name);
83                 return r;
84         }
85
86         r = amdgpu_ucode_validate(adev->vcn.fw);
87         if (r) {
88                 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
89                         fw_name);
90                 release_firmware(adev->vcn.fw);
91                 adev->vcn.fw = NULL;
92                 return r;
93         }
94
95         hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
96         adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
97
98         /* Bit 20-23, it is encode major and non-zero for new naming convention.
99          * This field is part of version minor and DRM_DISABLED_FLAG in old naming
100          * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
101          * is zero in old naming convention, this field is always zero so far.
102          * These four bits are used to tell which naming convention is present.
103          */
104         fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
105         if (fw_check) {
106                 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
107
108                 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
109                 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
110                 enc_major = fw_check;
111                 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
112                 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
113                 DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
114                         enc_major, enc_minor, dec_ver, vep, fw_rev);
115         } else {
116                 unsigned int version_major, version_minor, family_id;
117
118                 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
119                 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
120                 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
121                 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
122                         version_major, version_minor, family_id);
123         }
124
125         bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
126         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
127                 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
128         r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
129                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
130                                     &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
131         if (r) {
132                 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
133                 return r;
134         }
135
136         return 0;
137 }
138
139 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
140 {
141         int i;
142
143         kvfree(adev->vcn.saved_bo);
144
145         amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
146                               &adev->vcn.gpu_addr,
147                               (void **)&adev->vcn.cpu_addr);
148
149         amdgpu_ring_fini(&adev->vcn.ring_dec);
150
151         for (i = 0; i < adev->vcn.num_enc_rings; ++i)
152                 amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
153
154         amdgpu_ring_fini(&adev->vcn.ring_jpeg);
155
156         release_firmware(adev->vcn.fw);
157
158         return 0;
159 }
160
161 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
162 {
163         unsigned size;
164         void *ptr;
165
166         cancel_delayed_work_sync(&adev->vcn.idle_work);
167
168         if (adev->vcn.vcpu_bo == NULL)
169                 return 0;
170
171         size = amdgpu_bo_size(adev->vcn.vcpu_bo);
172         ptr = adev->vcn.cpu_addr;
173
174         adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
175         if (!adev->vcn.saved_bo)
176                 return -ENOMEM;
177
178         memcpy_fromio(adev->vcn.saved_bo, ptr, size);
179
180         return 0;
181 }
182
183 int amdgpu_vcn_resume(struct amdgpu_device *adev)
184 {
185         unsigned size;
186         void *ptr;
187
188         if (adev->vcn.vcpu_bo == NULL)
189                 return -EINVAL;
190
191         size = amdgpu_bo_size(adev->vcn.vcpu_bo);
192         ptr = adev->vcn.cpu_addr;
193
194         if (adev->vcn.saved_bo != NULL) {
195                 memcpy_toio(ptr, adev->vcn.saved_bo, size);
196                 kvfree(adev->vcn.saved_bo);
197                 adev->vcn.saved_bo = NULL;
198         } else {
199                 const struct common_firmware_header *hdr;
200                 unsigned offset;
201
202                 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
203                 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
204                         offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
205                         memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
206                                     le32_to_cpu(hdr->ucode_size_bytes));
207                         size -= le32_to_cpu(hdr->ucode_size_bytes);
208                         ptr += le32_to_cpu(hdr->ucode_size_bytes);
209                 }
210                 memset_io(ptr, 0, size);
211         }
212
213         return 0;
214 }
215
216 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
217 {
218         struct amdgpu_device *adev =
219                 container_of(work, struct amdgpu_device, vcn.idle_work.work);
220         unsigned int fences = 0;
221         unsigned int i;
222
223         for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
224                 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
225         }
226
227         if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)    {
228                 struct dpg_pause_state new_state;
229
230                 if (fences)
231                         new_state.fw_based = VCN_DPG_STATE__PAUSE;
232                 else
233                         new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
234
235                 if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
236                         new_state.jpeg = VCN_DPG_STATE__PAUSE;
237                 else
238                         new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
239
240                 adev->vcn.pause_dpg_mode(adev, &new_state);
241         }
242
243         fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
244         fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
245
246         if (fences == 0) {
247                 amdgpu_gfx_off_ctrl(adev, true);
248                 if (adev->pm.dpm_enabled)
249                         amdgpu_dpm_enable_uvd(adev, false);
250                 else
251                         amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
252                                                                AMD_PG_STATE_GATE);
253         } else {
254                 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
255         }
256 }
257
258 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
259 {
260         struct amdgpu_device *adev = ring->adev;
261         bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
262
263         if (set_clocks) {
264                 amdgpu_gfx_off_ctrl(adev, false);
265                 if (adev->pm.dpm_enabled)
266                         amdgpu_dpm_enable_uvd(adev, true);
267                 else
268                         amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
269                                                                AMD_PG_STATE_UNGATE);
270         }
271
272         if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)    {
273                 struct dpg_pause_state new_state;
274                 unsigned int fences = 0;
275                 unsigned int i;
276
277                 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
278                         fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
279                 }
280                 if (fences)
281                         new_state.fw_based = VCN_DPG_STATE__PAUSE;
282                 else
283                         new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
284
285                 if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
286                         new_state.jpeg = VCN_DPG_STATE__PAUSE;
287                 else
288                         new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
289
290                 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
291                         new_state.fw_based = VCN_DPG_STATE__PAUSE;
292                 else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
293                         new_state.jpeg = VCN_DPG_STATE__PAUSE;
294
295                 adev->vcn.pause_dpg_mode(adev, &new_state);
296         }
297 }
298
299 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
300 {
301         schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
302 }
303
304 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
305 {
306         struct amdgpu_device *adev = ring->adev;
307         uint32_t tmp = 0;
308         unsigned i;
309         int r;
310
311         WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
312         r = amdgpu_ring_alloc(ring, 3);
313         if (r)
314                 return r;
315
316         amdgpu_ring_write(ring,
317                 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0));
318         amdgpu_ring_write(ring, 0xDEADBEEF);
319         amdgpu_ring_commit(ring);
320         for (i = 0; i < adev->usec_timeout; i++) {
321                 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
322                 if (tmp == 0xDEADBEEF)
323                         break;
324                 udelay(1);
325         }
326
327         if (i >= adev->usec_timeout)
328                 r = -ETIMEDOUT;
329
330         return r;
331 }
332
333 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
334                                    struct amdgpu_bo *bo,
335                                    struct dma_fence **fence)
336 {
337         struct amdgpu_device *adev = ring->adev;
338         struct dma_fence *f = NULL;
339         struct amdgpu_job *job;
340         struct amdgpu_ib *ib;
341         uint64_t addr;
342         int i, r;
343
344         r = amdgpu_job_alloc_with_ib(adev, 64, &job);
345         if (r)
346                 goto err;
347
348         ib = &job->ibs[0];
349         addr = amdgpu_bo_gpu_offset(bo);
350         ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0);
351         ib->ptr[1] = addr;
352         ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0);
353         ib->ptr[3] = addr >> 32;
354         ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0);
355         ib->ptr[5] = 0;
356         for (i = 6; i < 16; i += 2) {
357                 ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0);
358                 ib->ptr[i+1] = 0;
359         }
360         ib->length_dw = 16;
361
362         r = amdgpu_job_submit_direct(job, ring, &f);
363         if (r)
364                 goto err_free;
365
366         amdgpu_bo_fence(bo, f, false);
367         amdgpu_bo_unreserve(bo);
368         amdgpu_bo_unref(&bo);
369
370         if (fence)
371                 *fence = dma_fence_get(f);
372         dma_fence_put(f);
373
374         return 0;
375
376 err_free:
377         amdgpu_job_free(job);
378
379 err:
380         amdgpu_bo_unreserve(bo);
381         amdgpu_bo_unref(&bo);
382         return r;
383 }
384
385 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
386                               struct dma_fence **fence)
387 {
388         struct amdgpu_device *adev = ring->adev;
389         struct amdgpu_bo *bo = NULL;
390         uint32_t *msg;
391         int r, i;
392
393         r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
394                                       AMDGPU_GEM_DOMAIN_VRAM,
395                                       &bo, NULL, (void **)&msg);
396         if (r)
397                 return r;
398
399         msg[0] = cpu_to_le32(0x00000028);
400         msg[1] = cpu_to_le32(0x00000038);
401         msg[2] = cpu_to_le32(0x00000001);
402         msg[3] = cpu_to_le32(0x00000000);
403         msg[4] = cpu_to_le32(handle);
404         msg[5] = cpu_to_le32(0x00000000);
405         msg[6] = cpu_to_le32(0x00000001);
406         msg[7] = cpu_to_le32(0x00000028);
407         msg[8] = cpu_to_le32(0x00000010);
408         msg[9] = cpu_to_le32(0x00000000);
409         msg[10] = cpu_to_le32(0x00000007);
410         msg[11] = cpu_to_le32(0x00000000);
411         msg[12] = cpu_to_le32(0x00000780);
412         msg[13] = cpu_to_le32(0x00000440);
413         for (i = 14; i < 1024; ++i)
414                 msg[i] = cpu_to_le32(0x0);
415
416         return amdgpu_vcn_dec_send_msg(ring, bo, fence);
417 }
418
419 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
420                                struct dma_fence **fence)
421 {
422         struct amdgpu_device *adev = ring->adev;
423         struct amdgpu_bo *bo = NULL;
424         uint32_t *msg;
425         int r, i;
426
427         r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
428                                       AMDGPU_GEM_DOMAIN_VRAM,
429                                       &bo, NULL, (void **)&msg);
430         if (r)
431                 return r;
432
433         msg[0] = cpu_to_le32(0x00000028);
434         msg[1] = cpu_to_le32(0x00000018);
435         msg[2] = cpu_to_le32(0x00000000);
436         msg[3] = cpu_to_le32(0x00000002);
437         msg[4] = cpu_to_le32(handle);
438         msg[5] = cpu_to_le32(0x00000000);
439         for (i = 6; i < 1024; ++i)
440                 msg[i] = cpu_to_le32(0x0);
441
442         return amdgpu_vcn_dec_send_msg(ring, bo, fence);
443 }
444
445 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
446 {
447         struct dma_fence *fence;
448         long r;
449
450         r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
451         if (r)
452                 goto error;
453
454         r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
455         if (r)
456                 goto error;
457
458         r = dma_fence_wait_timeout(fence, false, timeout);
459         if (r == 0)
460                 r = -ETIMEDOUT;
461         else if (r > 0)
462                 r = 0;
463
464         dma_fence_put(fence);
465 error:
466         return r;
467 }
468
469 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
470 {
471         struct amdgpu_device *adev = ring->adev;
472         uint32_t rptr;
473         unsigned i;
474         int r;
475
476         r = amdgpu_ring_alloc(ring, 16);
477         if (r)
478                 return r;
479
480         rptr = amdgpu_ring_get_rptr(ring);
481
482         amdgpu_ring_write(ring, VCN_ENC_CMD_END);
483         amdgpu_ring_commit(ring);
484
485         for (i = 0; i < adev->usec_timeout; i++) {
486                 if (amdgpu_ring_get_rptr(ring) != rptr)
487                         break;
488                 udelay(1);
489         }
490
491         if (i >= adev->usec_timeout)
492                 r = -ETIMEDOUT;
493
494         return r;
495 }
496
497 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
498                               struct dma_fence **fence)
499 {
500         const unsigned ib_size_dw = 16;
501         struct amdgpu_job *job;
502         struct amdgpu_ib *ib;
503         struct dma_fence *f = NULL;
504         uint64_t dummy;
505         int i, r;
506
507         r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
508         if (r)
509                 return r;
510
511         ib = &job->ibs[0];
512         dummy = ib->gpu_addr + 1024;
513
514         ib->length_dw = 0;
515         ib->ptr[ib->length_dw++] = 0x00000018;
516         ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
517         ib->ptr[ib->length_dw++] = handle;
518         ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
519         ib->ptr[ib->length_dw++] = dummy;
520         ib->ptr[ib->length_dw++] = 0x0000000b;
521
522         ib->ptr[ib->length_dw++] = 0x00000014;
523         ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
524         ib->ptr[ib->length_dw++] = 0x0000001c;
525         ib->ptr[ib->length_dw++] = 0x00000000;
526         ib->ptr[ib->length_dw++] = 0x00000000;
527
528         ib->ptr[ib->length_dw++] = 0x00000008;
529         ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
530
531         for (i = ib->length_dw; i < ib_size_dw; ++i)
532                 ib->ptr[i] = 0x0;
533
534         r = amdgpu_job_submit_direct(job, ring, &f);
535         if (r)
536                 goto err;
537
538         if (fence)
539                 *fence = dma_fence_get(f);
540         dma_fence_put(f);
541
542         return 0;
543
544 err:
545         amdgpu_job_free(job);
546         return r;
547 }
548
549 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
550                                 struct dma_fence **fence)
551 {
552         const unsigned ib_size_dw = 16;
553         struct amdgpu_job *job;
554         struct amdgpu_ib *ib;
555         struct dma_fence *f = NULL;
556         uint64_t dummy;
557         int i, r;
558
559         r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
560         if (r)
561                 return r;
562
563         ib = &job->ibs[0];
564         dummy = ib->gpu_addr + 1024;
565
566         ib->length_dw = 0;
567         ib->ptr[ib->length_dw++] = 0x00000018;
568         ib->ptr[ib->length_dw++] = 0x00000001;
569         ib->ptr[ib->length_dw++] = handle;
570         ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
571         ib->ptr[ib->length_dw++] = dummy;
572         ib->ptr[ib->length_dw++] = 0x0000000b;
573
574         ib->ptr[ib->length_dw++] = 0x00000014;
575         ib->ptr[ib->length_dw++] = 0x00000002;
576         ib->ptr[ib->length_dw++] = 0x0000001c;
577         ib->ptr[ib->length_dw++] = 0x00000000;
578         ib->ptr[ib->length_dw++] = 0x00000000;
579
580         ib->ptr[ib->length_dw++] = 0x00000008;
581         ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
582
583         for (i = ib->length_dw; i < ib_size_dw; ++i)
584                 ib->ptr[i] = 0x0;
585
586         r = amdgpu_job_submit_direct(job, ring, &f);
587         if (r)
588                 goto err;
589
590         if (fence)
591                 *fence = dma_fence_get(f);
592         dma_fence_put(f);
593
594         return 0;
595
596 err:
597         amdgpu_job_free(job);
598         return r;
599 }
600
601 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
602 {
603         struct dma_fence *fence = NULL;
604         long r;
605
606         r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
607         if (r)
608                 goto error;
609
610         r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
611         if (r)
612                 goto error;
613
614         r = dma_fence_wait_timeout(fence, false, timeout);
615         if (r == 0)
616                 r = -ETIMEDOUT;
617         else if (r > 0)
618                 r = 0;
619
620 error:
621         dma_fence_put(fence);
622         return r;
623 }
624
625 int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
626 {
627         struct amdgpu_device *adev = ring->adev;
628         uint32_t tmp = 0;
629         unsigned i;
630         int r;
631
632         WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
633         r = amdgpu_ring_alloc(ring, 3);
634
635         if (r)
636                 return r;
637
638         amdgpu_ring_write(ring,
639                 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, 0));
640         amdgpu_ring_write(ring, 0xDEADBEEF);
641         amdgpu_ring_commit(ring);
642
643         for (i = 0; i < adev->usec_timeout; i++) {
644                 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
645                 if (tmp == 0xDEADBEEF)
646                         break;
647                 udelay(1);
648         }
649
650         if (i >= adev->usec_timeout)
651                 r = -ETIMEDOUT;
652
653         return r;
654 }
655
656 static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
657                 struct dma_fence **fence)
658 {
659         struct amdgpu_device *adev = ring->adev;
660         struct amdgpu_job *job;
661         struct amdgpu_ib *ib;
662         struct dma_fence *f = NULL;
663         const unsigned ib_size_dw = 16;
664         int i, r;
665
666         r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
667         if (r)
668                 return r;
669
670         ib = &job->ibs[0];
671
672         ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, PACKETJ_TYPE0);
673         ib->ptr[1] = 0xDEADBEEF;
674         for (i = 2; i < 16; i += 2) {
675                 ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
676                 ib->ptr[i+1] = 0;
677         }
678         ib->length_dw = 16;
679
680         r = amdgpu_job_submit_direct(job, ring, &f);
681         if (r)
682                 goto err;
683
684         if (fence)
685                 *fence = dma_fence_get(f);
686         dma_fence_put(f);
687
688         return 0;
689
690 err:
691         amdgpu_job_free(job);
692         return r;
693 }
694
695 int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
696 {
697         struct amdgpu_device *adev = ring->adev;
698         uint32_t tmp = 0;
699         unsigned i;
700         struct dma_fence *fence = NULL;
701         long r = 0;
702
703         r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
704         if (r)
705                 goto error;
706
707         r = dma_fence_wait_timeout(fence, false, timeout);
708         if (r == 0) {
709                 r = -ETIMEDOUT;
710                 goto error;
711         } else if (r < 0) {
712                 goto error;
713         } else {
714                 r = 0;
715         }
716
717         for (i = 0; i < adev->usec_timeout; i++) {
718                 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
719                 if (tmp == 0xDEADBEEF)
720                         break;
721                 udelay(1);
722         }
723
724         if (i >= adev->usec_timeout)
725                 r = -ETIMEDOUT;
726
727         dma_fence_put(fence);
728 error:
729         return r;
730 }
This page took 0.076013 seconds and 4 git commands to generate.