2 * Copyright 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
32 #include "amdgpu_pm.h"
33 #include "amdgpu_vcn.h"
37 #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
38 #define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
39 #define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
40 #define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
41 #define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
42 #define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
43 #define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
44 #define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
45 #define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
46 #define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin"
47 #define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin"
49 MODULE_FIRMWARE(FIRMWARE_RAVEN);
50 MODULE_FIRMWARE(FIRMWARE_PICASSO);
51 MODULE_FIRMWARE(FIRMWARE_RAVEN2);
52 MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
53 MODULE_FIRMWARE(FIRMWARE_RENOIR);
54 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
55 MODULE_FIRMWARE(FIRMWARE_NAVI10);
56 MODULE_FIRMWARE(FIRMWARE_NAVI14);
57 MODULE_FIRMWARE(FIRMWARE_NAVI12);
58 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
59 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
61 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
63 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
65 unsigned long bo_size;
67 const struct common_firmware_header *hdr;
68 unsigned char fw_check;
71 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
72 mutex_init(&adev->vcn.vcn_pg_lock);
73 mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
74 atomic_set(&adev->vcn.total_submission_cnt, 0);
75 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
76 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
78 switch (adev->asic_type) {
80 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
81 fw_name = FIRMWARE_RAVEN2;
82 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
83 fw_name = FIRMWARE_PICASSO;
85 fw_name = FIRMWARE_RAVEN;
88 fw_name = FIRMWARE_ARCTURUS;
89 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
90 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
91 adev->vcn.indirect_sram = true;
94 if (adev->apu_flags & AMD_APU_IS_RENOIR)
95 fw_name = FIRMWARE_RENOIR;
97 fw_name = FIRMWARE_GREEN_SARDINE;
99 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
100 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
101 adev->vcn.indirect_sram = true;
104 fw_name = FIRMWARE_NAVI10;
105 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
106 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
107 adev->vcn.indirect_sram = true;
110 fw_name = FIRMWARE_NAVI14;
111 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
112 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
113 adev->vcn.indirect_sram = true;
116 fw_name = FIRMWARE_NAVI12;
117 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
118 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
119 adev->vcn.indirect_sram = true;
121 case CHIP_SIENNA_CICHLID:
122 fw_name = FIRMWARE_SIENNA_CICHLID;
123 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
124 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
125 adev->vcn.indirect_sram = true;
127 case CHIP_NAVY_FLOUNDER:
128 fw_name = FIRMWARE_NAVY_FLOUNDER;
129 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
130 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
131 adev->vcn.indirect_sram = true;
137 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
139 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
144 r = amdgpu_ucode_validate(adev->vcn.fw);
146 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
148 release_firmware(adev->vcn.fw);
153 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
154 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
156 /* Bit 20-23, it is encode major and non-zero for new naming convention.
157 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
158 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
159 * is zero in old naming convention, this field is always zero so far.
160 * These four bits are used to tell which naming convention is present.
162 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
164 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
166 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
167 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
168 enc_major = fw_check;
169 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
170 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
171 DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
172 enc_major, enc_minor, dec_ver, vep, fw_rev);
174 unsigned int version_major, version_minor, family_id;
176 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
177 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
178 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
179 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
180 version_major, version_minor, family_id);
183 bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
184 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
185 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
186 bo_size += AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
188 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
189 if (adev->vcn.harvest_config & (1 << i))
192 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
193 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
194 &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
196 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
200 adev->vcn.inst[i].fw_shared_cpu_addr = adev->vcn.inst[i].cpu_addr +
201 bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
202 adev->vcn.inst[i].fw_shared_gpu_addr = adev->vcn.inst[i].gpu_addr +
203 bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
205 if (adev->vcn.indirect_sram) {
206 r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
207 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
208 &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
210 dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
219 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
223 cancel_delayed_work_sync(&adev->vcn.idle_work);
225 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
226 if (adev->vcn.harvest_config & (1 << j))
229 if (adev->vcn.indirect_sram) {
230 amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
231 &adev->vcn.inst[j].dpg_sram_gpu_addr,
232 (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
234 kvfree(adev->vcn.inst[j].saved_bo);
236 amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
237 &adev->vcn.inst[j].gpu_addr,
238 (void **)&adev->vcn.inst[j].cpu_addr);
240 amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
242 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
243 amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
246 release_firmware(adev->vcn.fw);
247 mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
248 mutex_destroy(&adev->vcn.vcn_pg_lock);
253 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
259 cancel_delayed_work_sync(&adev->vcn.idle_work);
261 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
262 if (adev->vcn.harvest_config & (1 << i))
264 if (adev->vcn.inst[i].vcpu_bo == NULL)
267 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
268 ptr = adev->vcn.inst[i].cpu_addr;
270 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
271 if (!adev->vcn.inst[i].saved_bo)
274 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
279 int amdgpu_vcn_resume(struct amdgpu_device *adev)
285 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
286 if (adev->vcn.harvest_config & (1 << i))
288 if (adev->vcn.inst[i].vcpu_bo == NULL)
291 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
292 ptr = adev->vcn.inst[i].cpu_addr;
294 if (adev->vcn.inst[i].saved_bo != NULL) {
295 memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
296 kvfree(adev->vcn.inst[i].saved_bo);
297 adev->vcn.inst[i].saved_bo = NULL;
299 const struct common_firmware_header *hdr;
302 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
303 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
304 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
305 memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
306 le32_to_cpu(hdr->ucode_size_bytes));
307 size -= le32_to_cpu(hdr->ucode_size_bytes);
308 ptr += le32_to_cpu(hdr->ucode_size_bytes);
310 memset_io(ptr, 0, size);
316 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
318 struct amdgpu_device *adev =
319 container_of(work, struct amdgpu_device, vcn.idle_work.work);
320 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
323 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
324 if (adev->vcn.harvest_config & (1 << j))
327 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
328 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
331 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
332 struct dpg_pause_state new_state;
335 unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
336 new_state.fw_based = VCN_DPG_STATE__PAUSE;
338 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
340 adev->vcn.pause_dpg_mode(adev, j, &new_state);
343 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
347 if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
348 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
351 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
355 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
357 struct amdgpu_device *adev = ring->adev;
359 atomic_inc(&adev->vcn.total_submission_cnt);
360 cancel_delayed_work_sync(&adev->vcn.idle_work);
362 mutex_lock(&adev->vcn.vcn_pg_lock);
363 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
364 AMD_PG_STATE_UNGATE);
366 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
367 struct dpg_pause_state new_state;
369 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
370 atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
371 new_state.fw_based = VCN_DPG_STATE__PAUSE;
373 unsigned int fences = 0;
376 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
377 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
379 if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
380 new_state.fw_based = VCN_DPG_STATE__PAUSE;
382 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
385 adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
387 mutex_unlock(&adev->vcn.vcn_pg_lock);
390 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
392 if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
393 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
394 atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
396 atomic_dec(&ring->adev->vcn.total_submission_cnt);
398 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
401 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
403 struct amdgpu_device *adev = ring->adev;
408 /* VCN in SRIOV does not support direct register read/write */
409 if (amdgpu_sriov_vf(adev))
412 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
413 r = amdgpu_ring_alloc(ring, 3);
416 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
417 amdgpu_ring_write(ring, 0xDEADBEEF);
418 amdgpu_ring_commit(ring);
419 for (i = 0; i < adev->usec_timeout; i++) {
420 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
421 if (tmp == 0xDEADBEEF)
426 if (i >= adev->usec_timeout)
432 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
433 struct amdgpu_bo *bo,
434 struct dma_fence **fence)
436 struct amdgpu_device *adev = ring->adev;
437 struct dma_fence *f = NULL;
438 struct amdgpu_job *job;
439 struct amdgpu_ib *ib;
443 r = amdgpu_job_alloc_with_ib(adev, 64,
444 AMDGPU_IB_POOL_DIRECT, &job);
449 addr = amdgpu_bo_gpu_offset(bo);
450 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
452 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
453 ib->ptr[3] = addr >> 32;
454 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
456 for (i = 6; i < 16; i += 2) {
457 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
462 r = amdgpu_job_submit_direct(job, ring, &f);
466 amdgpu_bo_fence(bo, f, false);
467 amdgpu_bo_unreserve(bo);
468 amdgpu_bo_unref(&bo);
471 *fence = dma_fence_get(f);
477 amdgpu_job_free(job);
480 amdgpu_bo_unreserve(bo);
481 amdgpu_bo_unref(&bo);
485 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
486 struct dma_fence **fence)
488 struct amdgpu_device *adev = ring->adev;
489 struct amdgpu_bo *bo = NULL;
493 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
494 AMDGPU_GEM_DOMAIN_VRAM,
495 &bo, NULL, (void **)&msg);
499 msg[0] = cpu_to_le32(0x00000028);
500 msg[1] = cpu_to_le32(0x00000038);
501 msg[2] = cpu_to_le32(0x00000001);
502 msg[3] = cpu_to_le32(0x00000000);
503 msg[4] = cpu_to_le32(handle);
504 msg[5] = cpu_to_le32(0x00000000);
505 msg[6] = cpu_to_le32(0x00000001);
506 msg[7] = cpu_to_le32(0x00000028);
507 msg[8] = cpu_to_le32(0x00000010);
508 msg[9] = cpu_to_le32(0x00000000);
509 msg[10] = cpu_to_le32(0x00000007);
510 msg[11] = cpu_to_le32(0x00000000);
511 msg[12] = cpu_to_le32(0x00000780);
512 msg[13] = cpu_to_le32(0x00000440);
513 for (i = 14; i < 1024; ++i)
514 msg[i] = cpu_to_le32(0x0);
516 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
519 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
520 struct dma_fence **fence)
522 struct amdgpu_device *adev = ring->adev;
523 struct amdgpu_bo *bo = NULL;
527 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
528 AMDGPU_GEM_DOMAIN_VRAM,
529 &bo, NULL, (void **)&msg);
533 msg[0] = cpu_to_le32(0x00000028);
534 msg[1] = cpu_to_le32(0x00000018);
535 msg[2] = cpu_to_le32(0x00000000);
536 msg[3] = cpu_to_le32(0x00000002);
537 msg[4] = cpu_to_le32(handle);
538 msg[5] = cpu_to_le32(0x00000000);
539 for (i = 6; i < 1024; ++i)
540 msg[i] = cpu_to_le32(0x0);
542 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
545 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
547 struct dma_fence *fence;
550 r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
554 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
558 r = dma_fence_wait_timeout(fence, false, timeout);
564 dma_fence_put(fence);
569 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
571 struct amdgpu_device *adev = ring->adev;
576 if (amdgpu_sriov_vf(adev))
579 r = amdgpu_ring_alloc(ring, 16);
583 rptr = amdgpu_ring_get_rptr(ring);
585 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
586 amdgpu_ring_commit(ring);
588 for (i = 0; i < adev->usec_timeout; i++) {
589 if (amdgpu_ring_get_rptr(ring) != rptr)
594 if (i >= adev->usec_timeout)
600 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
601 struct amdgpu_bo *bo,
602 struct dma_fence **fence)
604 const unsigned ib_size_dw = 16;
605 struct amdgpu_job *job;
606 struct amdgpu_ib *ib;
607 struct dma_fence *f = NULL;
611 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
612 AMDGPU_IB_POOL_DIRECT, &job);
617 addr = amdgpu_bo_gpu_offset(bo);
620 ib->ptr[ib->length_dw++] = 0x00000018;
621 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
622 ib->ptr[ib->length_dw++] = handle;
623 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
624 ib->ptr[ib->length_dw++] = addr;
625 ib->ptr[ib->length_dw++] = 0x0000000b;
627 ib->ptr[ib->length_dw++] = 0x00000014;
628 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
629 ib->ptr[ib->length_dw++] = 0x0000001c;
630 ib->ptr[ib->length_dw++] = 0x00000000;
631 ib->ptr[ib->length_dw++] = 0x00000000;
633 ib->ptr[ib->length_dw++] = 0x00000008;
634 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
636 for (i = ib->length_dw; i < ib_size_dw; ++i)
639 r = amdgpu_job_submit_direct(job, ring, &f);
644 *fence = dma_fence_get(f);
650 amdgpu_job_free(job);
654 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
655 struct amdgpu_bo *bo,
656 struct dma_fence **fence)
658 const unsigned ib_size_dw = 16;
659 struct amdgpu_job *job;
660 struct amdgpu_ib *ib;
661 struct dma_fence *f = NULL;
665 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
666 AMDGPU_IB_POOL_DIRECT, &job);
671 addr = amdgpu_bo_gpu_offset(bo);
674 ib->ptr[ib->length_dw++] = 0x00000018;
675 ib->ptr[ib->length_dw++] = 0x00000001;
676 ib->ptr[ib->length_dw++] = handle;
677 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
678 ib->ptr[ib->length_dw++] = addr;
679 ib->ptr[ib->length_dw++] = 0x0000000b;
681 ib->ptr[ib->length_dw++] = 0x00000014;
682 ib->ptr[ib->length_dw++] = 0x00000002;
683 ib->ptr[ib->length_dw++] = 0x0000001c;
684 ib->ptr[ib->length_dw++] = 0x00000000;
685 ib->ptr[ib->length_dw++] = 0x00000000;
687 ib->ptr[ib->length_dw++] = 0x00000008;
688 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
690 for (i = ib->length_dw; i < ib_size_dw; ++i)
693 r = amdgpu_job_submit_direct(job, ring, &f);
698 *fence = dma_fence_get(f);
704 amdgpu_job_free(job);
708 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
710 struct dma_fence *fence = NULL;
711 struct amdgpu_bo *bo = NULL;
714 r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
715 AMDGPU_GEM_DOMAIN_VRAM,
720 r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
724 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
728 r = dma_fence_wait_timeout(fence, false, timeout);
735 dma_fence_put(fence);
736 amdgpu_bo_unreserve(bo);
737 amdgpu_bo_unref(&bo);