]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
Merge tag 'iio-for-5.11a' of https://git.kernel.org/pub/scm/linux/kernel/git/jic23...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vcn.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30
31 #include "amdgpu.h"
32 #include "amdgpu_pm.h"
33 #include "amdgpu_vcn.h"
34 #include "soc15d.h"
35
36 /* Firmware Names */
37 #define FIRMWARE_RAVEN          "amdgpu/raven_vcn.bin"
38 #define FIRMWARE_PICASSO        "amdgpu/picasso_vcn.bin"
39 #define FIRMWARE_RAVEN2         "amdgpu/raven2_vcn.bin"
40 #define FIRMWARE_ARCTURUS       "amdgpu/arcturus_vcn.bin"
41 #define FIRMWARE_RENOIR         "amdgpu/renoir_vcn.bin"
42 #define FIRMWARE_GREEN_SARDINE  "amdgpu/green_sardine_vcn.bin"
43 #define FIRMWARE_NAVI10         "amdgpu/navi10_vcn.bin"
44 #define FIRMWARE_NAVI14         "amdgpu/navi14_vcn.bin"
45 #define FIRMWARE_NAVI12         "amdgpu/navi12_vcn.bin"
46 #define FIRMWARE_SIENNA_CICHLID         "amdgpu/sienna_cichlid_vcn.bin"
47 #define FIRMWARE_NAVY_FLOUNDER  "amdgpu/navy_flounder_vcn.bin"
48
49 MODULE_FIRMWARE(FIRMWARE_RAVEN);
50 MODULE_FIRMWARE(FIRMWARE_PICASSO);
51 MODULE_FIRMWARE(FIRMWARE_RAVEN2);
52 MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
53 MODULE_FIRMWARE(FIRMWARE_RENOIR);
54 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
55 MODULE_FIRMWARE(FIRMWARE_NAVI10);
56 MODULE_FIRMWARE(FIRMWARE_NAVI14);
57 MODULE_FIRMWARE(FIRMWARE_NAVI12);
58 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
59 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
60
61 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
62
63 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
64 {
65         unsigned long bo_size;
66         const char *fw_name;
67         const struct common_firmware_header *hdr;
68         unsigned char fw_check;
69         int i, r;
70
71         INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
72         mutex_init(&adev->vcn.vcn_pg_lock);
73         mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
74         atomic_set(&adev->vcn.total_submission_cnt, 0);
75         for (i = 0; i < adev->vcn.num_vcn_inst; i++)
76                 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
77
78         switch (adev->asic_type) {
79         case CHIP_RAVEN:
80                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
81                         fw_name = FIRMWARE_RAVEN2;
82                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
83                         fw_name = FIRMWARE_PICASSO;
84                 else
85                         fw_name = FIRMWARE_RAVEN;
86                 break;
87         case CHIP_ARCTURUS:
88                 fw_name = FIRMWARE_ARCTURUS;
89                 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
90                     (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
91                         adev->vcn.indirect_sram = true;
92                 break;
93         case CHIP_RENOIR:
94                 if (adev->apu_flags & AMD_APU_IS_RENOIR)
95                         fw_name = FIRMWARE_RENOIR;
96                 else
97                         fw_name = FIRMWARE_GREEN_SARDINE;
98
99                 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
100                     (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
101                         adev->vcn.indirect_sram = true;
102                 break;
103         case CHIP_NAVI10:
104                 fw_name = FIRMWARE_NAVI10;
105                 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
106                     (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
107                         adev->vcn.indirect_sram = true;
108                 break;
109         case CHIP_NAVI14:
110                 fw_name = FIRMWARE_NAVI14;
111                 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
112                     (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
113                         adev->vcn.indirect_sram = true;
114                 break;
115         case CHIP_NAVI12:
116                 fw_name = FIRMWARE_NAVI12;
117                 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
118                     (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
119                         adev->vcn.indirect_sram = true;
120                 break;
121         case CHIP_SIENNA_CICHLID:
122                 fw_name = FIRMWARE_SIENNA_CICHLID;
123                 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
124                     (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
125                         adev->vcn.indirect_sram = true;
126                 break;
127         case CHIP_NAVY_FLOUNDER:
128                 fw_name = FIRMWARE_NAVY_FLOUNDER;
129                 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
130                     (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
131                         adev->vcn.indirect_sram = true;
132                 break;
133         default:
134                 return -EINVAL;
135         }
136
137         r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
138         if (r) {
139                 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
140                         fw_name);
141                 return r;
142         }
143
144         r = amdgpu_ucode_validate(adev->vcn.fw);
145         if (r) {
146                 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
147                         fw_name);
148                 release_firmware(adev->vcn.fw);
149                 adev->vcn.fw = NULL;
150                 return r;
151         }
152
153         hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
154         adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
155
156         /* Bit 20-23, it is encode major and non-zero for new naming convention.
157          * This field is part of version minor and DRM_DISABLED_FLAG in old naming
158          * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
159          * is zero in old naming convention, this field is always zero so far.
160          * These four bits are used to tell which naming convention is present.
161          */
162         fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
163         if (fw_check) {
164                 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
165
166                 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
167                 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
168                 enc_major = fw_check;
169                 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
170                 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
171                 DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
172                         enc_major, enc_minor, dec_ver, vep, fw_rev);
173         } else {
174                 unsigned int version_major, version_minor, family_id;
175
176                 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
177                 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
178                 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
179                 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
180                         version_major, version_minor, family_id);
181         }
182
183         bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
184         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
185                 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
186         bo_size += AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
187
188         for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
189                 if (adev->vcn.harvest_config & (1 << i))
190                         continue;
191
192                 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
193                                                 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
194                                                 &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
195                 if (r) {
196                         dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
197                         return r;
198                 }
199
200                 adev->vcn.inst[i].fw_shared_cpu_addr = adev->vcn.inst[i].cpu_addr +
201                                 bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
202                 adev->vcn.inst[i].fw_shared_gpu_addr = adev->vcn.inst[i].gpu_addr +
203                                 bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
204
205                 if (adev->vcn.indirect_sram) {
206                         r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
207                                         AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
208                                         &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
209                         if (r) {
210                                 dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
211                                 return r;
212                         }
213                 }
214         }
215
216         return 0;
217 }
218
219 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
220 {
221         int i, j;
222
223         cancel_delayed_work_sync(&adev->vcn.idle_work);
224
225         for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
226                 if (adev->vcn.harvest_config & (1 << j))
227                         continue;
228
229                 if (adev->vcn.indirect_sram) {
230                         amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
231                                                   &adev->vcn.inst[j].dpg_sram_gpu_addr,
232                                                   (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
233                 }
234                 kvfree(adev->vcn.inst[j].saved_bo);
235
236                 amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
237                                           &adev->vcn.inst[j].gpu_addr,
238                                           (void **)&adev->vcn.inst[j].cpu_addr);
239
240                 amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
241
242                 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
243                         amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
244         }
245
246         release_firmware(adev->vcn.fw);
247         mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
248         mutex_destroy(&adev->vcn.vcn_pg_lock);
249
250         return 0;
251 }
252
253 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
254 {
255         unsigned size;
256         void *ptr;
257         int i;
258
259         cancel_delayed_work_sync(&adev->vcn.idle_work);
260
261         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
262                 if (adev->vcn.harvest_config & (1 << i))
263                         continue;
264                 if (adev->vcn.inst[i].vcpu_bo == NULL)
265                         return 0;
266
267                 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
268                 ptr = adev->vcn.inst[i].cpu_addr;
269
270                 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
271                 if (!adev->vcn.inst[i].saved_bo)
272                         return -ENOMEM;
273
274                 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
275         }
276         return 0;
277 }
278
279 int amdgpu_vcn_resume(struct amdgpu_device *adev)
280 {
281         unsigned size;
282         void *ptr;
283         int i;
284
285         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
286                 if (adev->vcn.harvest_config & (1 << i))
287                         continue;
288                 if (adev->vcn.inst[i].vcpu_bo == NULL)
289                         return -EINVAL;
290
291                 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
292                 ptr = adev->vcn.inst[i].cpu_addr;
293
294                 if (adev->vcn.inst[i].saved_bo != NULL) {
295                         memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
296                         kvfree(adev->vcn.inst[i].saved_bo);
297                         adev->vcn.inst[i].saved_bo = NULL;
298                 } else {
299                         const struct common_firmware_header *hdr;
300                         unsigned offset;
301
302                         hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
303                         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
304                                 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
305                                 memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
306                                             le32_to_cpu(hdr->ucode_size_bytes));
307                                 size -= le32_to_cpu(hdr->ucode_size_bytes);
308                                 ptr += le32_to_cpu(hdr->ucode_size_bytes);
309                         }
310                         memset_io(ptr, 0, size);
311                 }
312         }
313         return 0;
314 }
315
316 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
317 {
318         struct amdgpu_device *adev =
319                 container_of(work, struct amdgpu_device, vcn.idle_work.work);
320         unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
321         unsigned int i, j;
322
323         for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
324                 if (adev->vcn.harvest_config & (1 << j))
325                         continue;
326
327                 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
328                         fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
329                 }
330
331                 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)    {
332                         struct dpg_pause_state new_state;
333
334                         if (fence[j] ||
335                                 unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
336                                 new_state.fw_based = VCN_DPG_STATE__PAUSE;
337                         else
338                                 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
339
340                         adev->vcn.pause_dpg_mode(adev, j, &new_state);
341                 }
342
343                 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
344                 fences += fence[j];
345         }
346
347         if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
348                 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
349                        AMD_PG_STATE_GATE);
350         } else {
351                 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
352         }
353 }
354
355 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
356 {
357         struct amdgpu_device *adev = ring->adev;
358
359         atomic_inc(&adev->vcn.total_submission_cnt);
360         cancel_delayed_work_sync(&adev->vcn.idle_work);
361
362         mutex_lock(&adev->vcn.vcn_pg_lock);
363         amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
364                AMD_PG_STATE_UNGATE);
365
366         if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)    {
367                 struct dpg_pause_state new_state;
368
369                 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
370                         atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
371                         new_state.fw_based = VCN_DPG_STATE__PAUSE;
372                 } else {
373                         unsigned int fences = 0;
374                         unsigned int i;
375
376                         for (i = 0; i < adev->vcn.num_enc_rings; ++i)
377                                 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
378
379                         if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
380                                 new_state.fw_based = VCN_DPG_STATE__PAUSE;
381                         else
382                                 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
383                 }
384
385                 adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
386         }
387         mutex_unlock(&adev->vcn.vcn_pg_lock);
388 }
389
390 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
391 {
392         if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
393                 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
394                 atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
395
396         atomic_dec(&ring->adev->vcn.total_submission_cnt);
397
398         schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
399 }
400
401 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
402 {
403         struct amdgpu_device *adev = ring->adev;
404         uint32_t tmp = 0;
405         unsigned i;
406         int r;
407
408         /* VCN in SRIOV does not support direct register read/write */
409         if (amdgpu_sriov_vf(adev))
410                 return 0;
411
412         WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
413         r = amdgpu_ring_alloc(ring, 3);
414         if (r)
415                 return r;
416         amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
417         amdgpu_ring_write(ring, 0xDEADBEEF);
418         amdgpu_ring_commit(ring);
419         for (i = 0; i < adev->usec_timeout; i++) {
420                 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
421                 if (tmp == 0xDEADBEEF)
422                         break;
423                 udelay(1);
424         }
425
426         if (i >= adev->usec_timeout)
427                 r = -ETIMEDOUT;
428
429         return r;
430 }
431
432 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
433                                    struct amdgpu_bo *bo,
434                                    struct dma_fence **fence)
435 {
436         struct amdgpu_device *adev = ring->adev;
437         struct dma_fence *f = NULL;
438         struct amdgpu_job *job;
439         struct amdgpu_ib *ib;
440         uint64_t addr;
441         int i, r;
442
443         r = amdgpu_job_alloc_with_ib(adev, 64,
444                                         AMDGPU_IB_POOL_DIRECT, &job);
445         if (r)
446                 goto err;
447
448         ib = &job->ibs[0];
449         addr = amdgpu_bo_gpu_offset(bo);
450         ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
451         ib->ptr[1] = addr;
452         ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
453         ib->ptr[3] = addr >> 32;
454         ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
455         ib->ptr[5] = 0;
456         for (i = 6; i < 16; i += 2) {
457                 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
458                 ib->ptr[i+1] = 0;
459         }
460         ib->length_dw = 16;
461
462         r = amdgpu_job_submit_direct(job, ring, &f);
463         if (r)
464                 goto err_free;
465
466         amdgpu_bo_fence(bo, f, false);
467         amdgpu_bo_unreserve(bo);
468         amdgpu_bo_unref(&bo);
469
470         if (fence)
471                 *fence = dma_fence_get(f);
472         dma_fence_put(f);
473
474         return 0;
475
476 err_free:
477         amdgpu_job_free(job);
478
479 err:
480         amdgpu_bo_unreserve(bo);
481         amdgpu_bo_unref(&bo);
482         return r;
483 }
484
485 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
486                               struct dma_fence **fence)
487 {
488         struct amdgpu_device *adev = ring->adev;
489         struct amdgpu_bo *bo = NULL;
490         uint32_t *msg;
491         int r, i;
492
493         r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
494                                       AMDGPU_GEM_DOMAIN_VRAM,
495                                       &bo, NULL, (void **)&msg);
496         if (r)
497                 return r;
498
499         msg[0] = cpu_to_le32(0x00000028);
500         msg[1] = cpu_to_le32(0x00000038);
501         msg[2] = cpu_to_le32(0x00000001);
502         msg[3] = cpu_to_le32(0x00000000);
503         msg[4] = cpu_to_le32(handle);
504         msg[5] = cpu_to_le32(0x00000000);
505         msg[6] = cpu_to_le32(0x00000001);
506         msg[7] = cpu_to_le32(0x00000028);
507         msg[8] = cpu_to_le32(0x00000010);
508         msg[9] = cpu_to_le32(0x00000000);
509         msg[10] = cpu_to_le32(0x00000007);
510         msg[11] = cpu_to_le32(0x00000000);
511         msg[12] = cpu_to_le32(0x00000780);
512         msg[13] = cpu_to_le32(0x00000440);
513         for (i = 14; i < 1024; ++i)
514                 msg[i] = cpu_to_le32(0x0);
515
516         return amdgpu_vcn_dec_send_msg(ring, bo, fence);
517 }
518
519 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
520                                struct dma_fence **fence)
521 {
522         struct amdgpu_device *adev = ring->adev;
523         struct amdgpu_bo *bo = NULL;
524         uint32_t *msg;
525         int r, i;
526
527         r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
528                                       AMDGPU_GEM_DOMAIN_VRAM,
529                                       &bo, NULL, (void **)&msg);
530         if (r)
531                 return r;
532
533         msg[0] = cpu_to_le32(0x00000028);
534         msg[1] = cpu_to_le32(0x00000018);
535         msg[2] = cpu_to_le32(0x00000000);
536         msg[3] = cpu_to_le32(0x00000002);
537         msg[4] = cpu_to_le32(handle);
538         msg[5] = cpu_to_le32(0x00000000);
539         for (i = 6; i < 1024; ++i)
540                 msg[i] = cpu_to_le32(0x0);
541
542         return amdgpu_vcn_dec_send_msg(ring, bo, fence);
543 }
544
545 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
546 {
547         struct dma_fence *fence;
548         long r;
549
550         r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
551         if (r)
552                 goto error;
553
554         r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
555         if (r)
556                 goto error;
557
558         r = dma_fence_wait_timeout(fence, false, timeout);
559         if (r == 0)
560                 r = -ETIMEDOUT;
561         else if (r > 0)
562                 r = 0;
563
564         dma_fence_put(fence);
565 error:
566         return r;
567 }
568
569 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
570 {
571         struct amdgpu_device *adev = ring->adev;
572         uint32_t rptr;
573         unsigned i;
574         int r;
575
576         if (amdgpu_sriov_vf(adev))
577                 return 0;
578
579         r = amdgpu_ring_alloc(ring, 16);
580         if (r)
581                 return r;
582
583         rptr = amdgpu_ring_get_rptr(ring);
584
585         amdgpu_ring_write(ring, VCN_ENC_CMD_END);
586         amdgpu_ring_commit(ring);
587
588         for (i = 0; i < adev->usec_timeout; i++) {
589                 if (amdgpu_ring_get_rptr(ring) != rptr)
590                         break;
591                 udelay(1);
592         }
593
594         if (i >= adev->usec_timeout)
595                 r = -ETIMEDOUT;
596
597         return r;
598 }
599
600 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
601                                          struct amdgpu_bo *bo,
602                                          struct dma_fence **fence)
603 {
604         const unsigned ib_size_dw = 16;
605         struct amdgpu_job *job;
606         struct amdgpu_ib *ib;
607         struct dma_fence *f = NULL;
608         uint64_t addr;
609         int i, r;
610
611         r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
612                                         AMDGPU_IB_POOL_DIRECT, &job);
613         if (r)
614                 return r;
615
616         ib = &job->ibs[0];
617         addr = amdgpu_bo_gpu_offset(bo);
618
619         ib->length_dw = 0;
620         ib->ptr[ib->length_dw++] = 0x00000018;
621         ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
622         ib->ptr[ib->length_dw++] = handle;
623         ib->ptr[ib->length_dw++] = upper_32_bits(addr);
624         ib->ptr[ib->length_dw++] = addr;
625         ib->ptr[ib->length_dw++] = 0x0000000b;
626
627         ib->ptr[ib->length_dw++] = 0x00000014;
628         ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
629         ib->ptr[ib->length_dw++] = 0x0000001c;
630         ib->ptr[ib->length_dw++] = 0x00000000;
631         ib->ptr[ib->length_dw++] = 0x00000000;
632
633         ib->ptr[ib->length_dw++] = 0x00000008;
634         ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
635
636         for (i = ib->length_dw; i < ib_size_dw; ++i)
637                 ib->ptr[i] = 0x0;
638
639         r = amdgpu_job_submit_direct(job, ring, &f);
640         if (r)
641                 goto err;
642
643         if (fence)
644                 *fence = dma_fence_get(f);
645         dma_fence_put(f);
646
647         return 0;
648
649 err:
650         amdgpu_job_free(job);
651         return r;
652 }
653
654 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
655                                           struct amdgpu_bo *bo,
656                                           struct dma_fence **fence)
657 {
658         const unsigned ib_size_dw = 16;
659         struct amdgpu_job *job;
660         struct amdgpu_ib *ib;
661         struct dma_fence *f = NULL;
662         uint64_t addr;
663         int i, r;
664
665         r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
666                                         AMDGPU_IB_POOL_DIRECT, &job);
667         if (r)
668                 return r;
669
670         ib = &job->ibs[0];
671         addr = amdgpu_bo_gpu_offset(bo);
672
673         ib->length_dw = 0;
674         ib->ptr[ib->length_dw++] = 0x00000018;
675         ib->ptr[ib->length_dw++] = 0x00000001;
676         ib->ptr[ib->length_dw++] = handle;
677         ib->ptr[ib->length_dw++] = upper_32_bits(addr);
678         ib->ptr[ib->length_dw++] = addr;
679         ib->ptr[ib->length_dw++] = 0x0000000b;
680
681         ib->ptr[ib->length_dw++] = 0x00000014;
682         ib->ptr[ib->length_dw++] = 0x00000002;
683         ib->ptr[ib->length_dw++] = 0x0000001c;
684         ib->ptr[ib->length_dw++] = 0x00000000;
685         ib->ptr[ib->length_dw++] = 0x00000000;
686
687         ib->ptr[ib->length_dw++] = 0x00000008;
688         ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
689
690         for (i = ib->length_dw; i < ib_size_dw; ++i)
691                 ib->ptr[i] = 0x0;
692
693         r = amdgpu_job_submit_direct(job, ring, &f);
694         if (r)
695                 goto err;
696
697         if (fence)
698                 *fence = dma_fence_get(f);
699         dma_fence_put(f);
700
701         return 0;
702
703 err:
704         amdgpu_job_free(job);
705         return r;
706 }
707
708 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
709 {
710         struct dma_fence *fence = NULL;
711         struct amdgpu_bo *bo = NULL;
712         long r;
713
714         r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
715                                       AMDGPU_GEM_DOMAIN_VRAM,
716                                       &bo, NULL, NULL);
717         if (r)
718                 return r;
719
720         r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
721         if (r)
722                 goto error;
723
724         r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
725         if (r)
726                 goto error;
727
728         r = dma_fence_wait_timeout(fence, false, timeout);
729         if (r == 0)
730                 r = -ETIMEDOUT;
731         else if (r > 0)
732                 r = 0;
733
734 error:
735         dma_fence_put(fence);
736         amdgpu_bo_unreserve(bo);
737         amdgpu_bo_unref(&bo);
738         return r;
739 }
This page took 0.076356 seconds and 4 git commands to generate.