2 * Copyright 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
32 #include "amdgpu_pm.h"
33 #include "amdgpu_vcn.h"
37 #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
38 #define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
39 #define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
40 #define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
41 #define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
42 #define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
43 #define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
44 #define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
45 #define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
46 #define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin"
47 #define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin"
48 #define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin"
49 #define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin"
51 MODULE_FIRMWARE(FIRMWARE_RAVEN);
52 MODULE_FIRMWARE(FIRMWARE_PICASSO);
53 MODULE_FIRMWARE(FIRMWARE_RAVEN2);
54 MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
55 MODULE_FIRMWARE(FIRMWARE_RENOIR);
56 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
57 MODULE_FIRMWARE(FIRMWARE_NAVI10);
58 MODULE_FIRMWARE(FIRMWARE_NAVI14);
59 MODULE_FIRMWARE(FIRMWARE_NAVI12);
60 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
61 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
62 MODULE_FIRMWARE(FIRMWARE_VANGOGH);
63 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
65 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
67 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
69 unsigned long bo_size;
71 const struct common_firmware_header *hdr;
72 unsigned char fw_check;
75 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
76 mutex_init(&adev->vcn.vcn_pg_lock);
77 mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
78 atomic_set(&adev->vcn.total_submission_cnt, 0);
79 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
80 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
82 switch (adev->asic_type) {
84 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
85 fw_name = FIRMWARE_RAVEN2;
86 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
87 fw_name = FIRMWARE_PICASSO;
89 fw_name = FIRMWARE_RAVEN;
92 fw_name = FIRMWARE_ARCTURUS;
93 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
94 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
95 adev->vcn.indirect_sram = true;
98 if (adev->apu_flags & AMD_APU_IS_RENOIR)
99 fw_name = FIRMWARE_RENOIR;
101 fw_name = FIRMWARE_GREEN_SARDINE;
103 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
104 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
105 adev->vcn.indirect_sram = true;
108 fw_name = FIRMWARE_NAVI10;
109 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
110 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
111 adev->vcn.indirect_sram = true;
114 fw_name = FIRMWARE_NAVI14;
115 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
116 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
117 adev->vcn.indirect_sram = true;
120 fw_name = FIRMWARE_NAVI12;
121 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
122 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
123 adev->vcn.indirect_sram = true;
125 case CHIP_SIENNA_CICHLID:
126 fw_name = FIRMWARE_SIENNA_CICHLID;
127 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
128 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
129 adev->vcn.indirect_sram = true;
131 case CHIP_NAVY_FLOUNDER:
132 fw_name = FIRMWARE_NAVY_FLOUNDER;
133 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
134 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
135 adev->vcn.indirect_sram = true;
138 fw_name = FIRMWARE_VANGOGH;
140 case CHIP_DIMGREY_CAVEFISH:
141 fw_name = FIRMWARE_DIMGREY_CAVEFISH;
142 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
143 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
144 adev->vcn.indirect_sram = true;
150 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
152 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
157 r = amdgpu_ucode_validate(adev->vcn.fw);
159 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
161 release_firmware(adev->vcn.fw);
166 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
167 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
169 /* Bit 20-23, it is encode major and non-zero for new naming convention.
170 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
171 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
172 * is zero in old naming convention, this field is always zero so far.
173 * These four bits are used to tell which naming convention is present.
175 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
177 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
179 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
180 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
181 enc_major = fw_check;
182 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
183 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
184 DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
185 enc_major, enc_minor, dec_ver, vep, fw_rev);
187 unsigned int version_major, version_minor, family_id;
189 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
190 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
191 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
192 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
193 version_major, version_minor, family_id);
196 bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
197 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
198 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
199 bo_size += AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
201 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
202 if (adev->vcn.harvest_config & (1 << i))
205 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
206 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
207 &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
209 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
213 adev->vcn.inst[i].fw_shared_cpu_addr = adev->vcn.inst[i].cpu_addr +
214 bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
215 adev->vcn.inst[i].fw_shared_gpu_addr = adev->vcn.inst[i].gpu_addr +
216 bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
218 if (adev->vcn.indirect_sram) {
219 r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
220 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
221 &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
223 dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
232 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
236 cancel_delayed_work_sync(&adev->vcn.idle_work);
238 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
239 if (adev->vcn.harvest_config & (1 << j))
242 if (adev->vcn.indirect_sram) {
243 amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
244 &adev->vcn.inst[j].dpg_sram_gpu_addr,
245 (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
247 kvfree(adev->vcn.inst[j].saved_bo);
249 amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
250 &adev->vcn.inst[j].gpu_addr,
251 (void **)&adev->vcn.inst[j].cpu_addr);
253 amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
255 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
256 amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
259 release_firmware(adev->vcn.fw);
260 mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
261 mutex_destroy(&adev->vcn.vcn_pg_lock);
266 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
272 cancel_delayed_work_sync(&adev->vcn.idle_work);
274 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
275 if (adev->vcn.harvest_config & (1 << i))
277 if (adev->vcn.inst[i].vcpu_bo == NULL)
280 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
281 ptr = adev->vcn.inst[i].cpu_addr;
283 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
284 if (!adev->vcn.inst[i].saved_bo)
287 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
292 int amdgpu_vcn_resume(struct amdgpu_device *adev)
298 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
299 if (adev->vcn.harvest_config & (1 << i))
301 if (adev->vcn.inst[i].vcpu_bo == NULL)
304 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
305 ptr = adev->vcn.inst[i].cpu_addr;
307 if (adev->vcn.inst[i].saved_bo != NULL) {
308 memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
309 kvfree(adev->vcn.inst[i].saved_bo);
310 adev->vcn.inst[i].saved_bo = NULL;
312 const struct common_firmware_header *hdr;
315 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
316 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
317 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
318 memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
319 le32_to_cpu(hdr->ucode_size_bytes));
320 size -= le32_to_cpu(hdr->ucode_size_bytes);
321 ptr += le32_to_cpu(hdr->ucode_size_bytes);
323 memset_io(ptr, 0, size);
329 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
331 struct amdgpu_device *adev =
332 container_of(work, struct amdgpu_device, vcn.idle_work.work);
333 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
336 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
337 if (adev->vcn.harvest_config & (1 << j))
340 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
341 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
344 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
345 struct dpg_pause_state new_state;
348 unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
349 new_state.fw_based = VCN_DPG_STATE__PAUSE;
351 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
353 adev->vcn.pause_dpg_mode(adev, j, &new_state);
356 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
360 if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
361 amdgpu_gfx_off_ctrl(adev, true);
362 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
365 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
369 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
371 struct amdgpu_device *adev = ring->adev;
373 atomic_inc(&adev->vcn.total_submission_cnt);
375 if (!cancel_delayed_work_sync(&adev->vcn.idle_work))
376 amdgpu_gfx_off_ctrl(adev, false);
378 mutex_lock(&adev->vcn.vcn_pg_lock);
379 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
380 AMD_PG_STATE_UNGATE);
382 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
383 struct dpg_pause_state new_state;
385 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
386 atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
387 new_state.fw_based = VCN_DPG_STATE__PAUSE;
389 unsigned int fences = 0;
392 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
393 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
395 if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
396 new_state.fw_based = VCN_DPG_STATE__PAUSE;
398 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
401 adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
403 mutex_unlock(&adev->vcn.vcn_pg_lock);
406 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
408 if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
409 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
410 atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
412 atomic_dec(&ring->adev->vcn.total_submission_cnt);
414 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
417 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
419 struct amdgpu_device *adev = ring->adev;
424 /* VCN in SRIOV does not support direct register read/write */
425 if (amdgpu_sriov_vf(adev))
428 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
429 r = amdgpu_ring_alloc(ring, 3);
432 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
433 amdgpu_ring_write(ring, 0xDEADBEEF);
434 amdgpu_ring_commit(ring);
435 for (i = 0; i < adev->usec_timeout; i++) {
436 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
437 if (tmp == 0xDEADBEEF)
442 if (i >= adev->usec_timeout)
448 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
449 struct amdgpu_bo *bo,
450 struct dma_fence **fence)
452 struct amdgpu_device *adev = ring->adev;
453 struct dma_fence *f = NULL;
454 struct amdgpu_job *job;
455 struct amdgpu_ib *ib;
459 r = amdgpu_job_alloc_with_ib(adev, 64,
460 AMDGPU_IB_POOL_DIRECT, &job);
465 addr = amdgpu_bo_gpu_offset(bo);
466 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
468 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
469 ib->ptr[3] = addr >> 32;
470 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
472 for (i = 6; i < 16; i += 2) {
473 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
478 r = amdgpu_job_submit_direct(job, ring, &f);
482 amdgpu_bo_fence(bo, f, false);
483 amdgpu_bo_unreserve(bo);
484 amdgpu_bo_unref(&bo);
487 *fence = dma_fence_get(f);
493 amdgpu_job_free(job);
496 amdgpu_bo_unreserve(bo);
497 amdgpu_bo_unref(&bo);
501 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
502 struct dma_fence **fence)
504 struct amdgpu_device *adev = ring->adev;
505 struct amdgpu_bo *bo = NULL;
509 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
510 AMDGPU_GEM_DOMAIN_VRAM,
511 &bo, NULL, (void **)&msg);
515 msg[0] = cpu_to_le32(0x00000028);
516 msg[1] = cpu_to_le32(0x00000038);
517 msg[2] = cpu_to_le32(0x00000001);
518 msg[3] = cpu_to_le32(0x00000000);
519 msg[4] = cpu_to_le32(handle);
520 msg[5] = cpu_to_le32(0x00000000);
521 msg[6] = cpu_to_le32(0x00000001);
522 msg[7] = cpu_to_le32(0x00000028);
523 msg[8] = cpu_to_le32(0x00000010);
524 msg[9] = cpu_to_le32(0x00000000);
525 msg[10] = cpu_to_le32(0x00000007);
526 msg[11] = cpu_to_le32(0x00000000);
527 msg[12] = cpu_to_le32(0x00000780);
528 msg[13] = cpu_to_le32(0x00000440);
529 for (i = 14; i < 1024; ++i)
530 msg[i] = cpu_to_le32(0x0);
532 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
535 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
536 struct dma_fence **fence)
538 struct amdgpu_device *adev = ring->adev;
539 struct amdgpu_bo *bo = NULL;
543 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
544 AMDGPU_GEM_DOMAIN_VRAM,
545 &bo, NULL, (void **)&msg);
549 msg[0] = cpu_to_le32(0x00000028);
550 msg[1] = cpu_to_le32(0x00000018);
551 msg[2] = cpu_to_le32(0x00000000);
552 msg[3] = cpu_to_le32(0x00000002);
553 msg[4] = cpu_to_le32(handle);
554 msg[5] = cpu_to_le32(0x00000000);
555 for (i = 6; i < 1024; ++i)
556 msg[i] = cpu_to_le32(0x0);
558 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
561 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
563 struct dma_fence *fence;
566 r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
570 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
574 r = dma_fence_wait_timeout(fence, false, timeout);
580 dma_fence_put(fence);
585 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
587 struct amdgpu_device *adev = ring->adev;
592 if (amdgpu_sriov_vf(adev))
595 r = amdgpu_ring_alloc(ring, 16);
599 rptr = amdgpu_ring_get_rptr(ring);
601 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
602 amdgpu_ring_commit(ring);
604 for (i = 0; i < adev->usec_timeout; i++) {
605 if (amdgpu_ring_get_rptr(ring) != rptr)
610 if (i >= adev->usec_timeout)
616 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
617 struct amdgpu_bo *bo,
618 struct dma_fence **fence)
620 const unsigned ib_size_dw = 16;
621 struct amdgpu_job *job;
622 struct amdgpu_ib *ib;
623 struct dma_fence *f = NULL;
627 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
628 AMDGPU_IB_POOL_DIRECT, &job);
633 addr = amdgpu_bo_gpu_offset(bo);
636 ib->ptr[ib->length_dw++] = 0x00000018;
637 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
638 ib->ptr[ib->length_dw++] = handle;
639 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
640 ib->ptr[ib->length_dw++] = addr;
641 ib->ptr[ib->length_dw++] = 0x0000000b;
643 ib->ptr[ib->length_dw++] = 0x00000014;
644 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
645 ib->ptr[ib->length_dw++] = 0x0000001c;
646 ib->ptr[ib->length_dw++] = 0x00000000;
647 ib->ptr[ib->length_dw++] = 0x00000000;
649 ib->ptr[ib->length_dw++] = 0x00000008;
650 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
652 for (i = ib->length_dw; i < ib_size_dw; ++i)
655 r = amdgpu_job_submit_direct(job, ring, &f);
660 *fence = dma_fence_get(f);
666 amdgpu_job_free(job);
670 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
671 struct amdgpu_bo *bo,
672 struct dma_fence **fence)
674 const unsigned ib_size_dw = 16;
675 struct amdgpu_job *job;
676 struct amdgpu_ib *ib;
677 struct dma_fence *f = NULL;
681 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
682 AMDGPU_IB_POOL_DIRECT, &job);
687 addr = amdgpu_bo_gpu_offset(bo);
690 ib->ptr[ib->length_dw++] = 0x00000018;
691 ib->ptr[ib->length_dw++] = 0x00000001;
692 ib->ptr[ib->length_dw++] = handle;
693 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
694 ib->ptr[ib->length_dw++] = addr;
695 ib->ptr[ib->length_dw++] = 0x0000000b;
697 ib->ptr[ib->length_dw++] = 0x00000014;
698 ib->ptr[ib->length_dw++] = 0x00000002;
699 ib->ptr[ib->length_dw++] = 0x0000001c;
700 ib->ptr[ib->length_dw++] = 0x00000000;
701 ib->ptr[ib->length_dw++] = 0x00000000;
703 ib->ptr[ib->length_dw++] = 0x00000008;
704 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
706 for (i = ib->length_dw; i < ib_size_dw; ++i)
709 r = amdgpu_job_submit_direct(job, ring, &f);
714 *fence = dma_fence_get(f);
720 amdgpu_job_free(job);
724 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
726 struct dma_fence *fence = NULL;
727 struct amdgpu_bo *bo = NULL;
730 r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
731 AMDGPU_GEM_DOMAIN_VRAM,
736 r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
740 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
744 r = dma_fence_wait_timeout(fence, false, timeout);
751 dma_fence_put(fence);
752 amdgpu_bo_unreserve(bo);
753 amdgpu_bo_unref(&bo);