2 * Copyright 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <drm/drm_drv.h>
33 #include "amdgpu_pm.h"
34 #include "amdgpu_vcn.h"
38 #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
39 #define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
40 #define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
41 #define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
42 #define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
43 #define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
44 #define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
45 #define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
46 #define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
47 #define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin"
48 #define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin"
49 #define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin"
50 #define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin"
51 #define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
52 #define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
53 #define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
55 MODULE_FIRMWARE(FIRMWARE_RAVEN);
56 MODULE_FIRMWARE(FIRMWARE_PICASSO);
57 MODULE_FIRMWARE(FIRMWARE_RAVEN2);
58 MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
59 MODULE_FIRMWARE(FIRMWARE_RENOIR);
60 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
61 MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
62 MODULE_FIRMWARE(FIRMWARE_NAVI10);
63 MODULE_FIRMWARE(FIRMWARE_NAVI14);
64 MODULE_FIRMWARE(FIRMWARE_NAVI12);
65 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
66 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
67 MODULE_FIRMWARE(FIRMWARE_VANGOGH);
68 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
69 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
70 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
72 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
74 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
76 unsigned long bo_size;
78 const struct common_firmware_header *hdr;
79 unsigned char fw_check;
82 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
83 mutex_init(&adev->vcn.vcn_pg_lock);
84 mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
85 atomic_set(&adev->vcn.total_submission_cnt, 0);
86 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
87 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
89 switch (adev->ip_versions[UVD_HWIP][0]) {
90 case IP_VERSION(1, 0, 0):
91 case IP_VERSION(1, 0, 1):
92 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
93 fw_name = FIRMWARE_RAVEN2;
94 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
95 fw_name = FIRMWARE_PICASSO;
97 fw_name = FIRMWARE_RAVEN;
99 case IP_VERSION(2, 5, 0):
100 fw_name = FIRMWARE_ARCTURUS;
101 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
102 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
103 adev->vcn.indirect_sram = true;
105 case IP_VERSION(2, 2, 0):
106 if (adev->apu_flags & AMD_APU_IS_RENOIR)
107 fw_name = FIRMWARE_RENOIR;
109 fw_name = FIRMWARE_GREEN_SARDINE;
111 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
112 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
113 adev->vcn.indirect_sram = true;
115 case IP_VERSION(2, 6, 0):
116 fw_name = FIRMWARE_ALDEBARAN;
117 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
118 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
119 adev->vcn.indirect_sram = true;
121 case IP_VERSION(2, 0, 0):
122 fw_name = FIRMWARE_NAVI10;
123 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
124 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
125 adev->vcn.indirect_sram = true;
127 case IP_VERSION(2, 0, 2):
128 if (adev->asic_type == CHIP_NAVI12)
129 fw_name = FIRMWARE_NAVI12;
131 fw_name = FIRMWARE_NAVI14;
132 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
133 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
134 adev->vcn.indirect_sram = true;
136 case IP_VERSION(3, 0, 0):
137 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
138 fw_name = FIRMWARE_SIENNA_CICHLID;
140 fw_name = FIRMWARE_NAVY_FLOUNDER;
141 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
142 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
143 adev->vcn.indirect_sram = true;
145 case IP_VERSION(3, 0, 2):
146 fw_name = FIRMWARE_VANGOGH;
148 case IP_VERSION(3, 0, 16):
149 fw_name = FIRMWARE_DIMGREY_CAVEFISH;
150 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
151 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
152 adev->vcn.indirect_sram = true;
154 case IP_VERSION(3, 0, 33):
155 fw_name = FIRMWARE_BEIGE_GOBY;
156 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
157 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
158 adev->vcn.indirect_sram = true;
160 case IP_VERSION(3, 1, 1):
161 fw_name = FIRMWARE_YELLOW_CARP;
162 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
163 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
164 adev->vcn.indirect_sram = true;
170 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
172 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
177 r = amdgpu_ucode_validate(adev->vcn.fw);
179 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
181 release_firmware(adev->vcn.fw);
186 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
187 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
189 /* Bit 20-23, it is encode major and non-zero for new naming convention.
190 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
191 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
192 * is zero in old naming convention, this field is always zero so far.
193 * These four bits are used to tell which naming convention is present.
195 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
197 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
199 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
200 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
201 enc_major = fw_check;
202 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
203 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
204 DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
205 enc_major, enc_minor, dec_ver, vep, fw_rev);
207 unsigned int version_major, version_minor, family_id;
209 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
210 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
211 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
212 DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
213 version_major, version_minor, family_id);
216 bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
217 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
218 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
219 bo_size += AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
221 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
222 if (adev->vcn.harvest_config & (1 << i))
225 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
226 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
227 &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
229 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
233 adev->vcn.inst[i].fw_shared_cpu_addr = adev->vcn.inst[i].cpu_addr +
234 bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
235 adev->vcn.inst[i].fw_shared_gpu_addr = adev->vcn.inst[i].gpu_addr +
236 bo_size - AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
238 if (adev->vcn.indirect_sram) {
239 r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
240 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
241 &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
243 dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
252 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
256 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
257 if (adev->vcn.harvest_config & (1 << j))
260 if (adev->vcn.indirect_sram) {
261 amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
262 &adev->vcn.inst[j].dpg_sram_gpu_addr,
263 (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr);
265 kvfree(adev->vcn.inst[j].saved_bo);
267 amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo,
268 &adev->vcn.inst[j].gpu_addr,
269 (void **)&adev->vcn.inst[j].cpu_addr);
271 amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec);
273 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
274 amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
277 release_firmware(adev->vcn.fw);
278 mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
279 mutex_destroy(&adev->vcn.vcn_pg_lock);
284 bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
292 /* if cannot find IP data, then this VCN does not exist */
293 if (amdgpu_discovery_get_vcn_version(adev, vcn_instance, &major, &minor, &revision) != 0)
296 if ((type == VCN_ENCODE_RING) && (revision & VCN_BLOCK_ENCODE_DISABLE_MASK)) {
298 } else if ((type == VCN_DECODE_RING) && (revision & VCN_BLOCK_DECODE_DISABLE_MASK)) {
300 } else if ((type == VCN_UNIFIED_RING) && (revision & VCN_BLOCK_QUEUE_DISABLE_MASK)) {
307 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
313 cancel_delayed_work_sync(&adev->vcn.idle_work);
315 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
316 if (adev->vcn.harvest_config & (1 << i))
318 if (adev->vcn.inst[i].vcpu_bo == NULL)
321 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
322 ptr = adev->vcn.inst[i].cpu_addr;
324 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
325 if (!adev->vcn.inst[i].saved_bo)
328 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
329 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
336 int amdgpu_vcn_resume(struct amdgpu_device *adev)
342 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
343 if (adev->vcn.harvest_config & (1 << i))
345 if (adev->vcn.inst[i].vcpu_bo == NULL)
348 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
349 ptr = adev->vcn.inst[i].cpu_addr;
351 if (adev->vcn.inst[i].saved_bo != NULL) {
352 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
353 memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
356 kvfree(adev->vcn.inst[i].saved_bo);
357 adev->vcn.inst[i].saved_bo = NULL;
359 const struct common_firmware_header *hdr;
362 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
363 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
364 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
365 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
366 memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
367 le32_to_cpu(hdr->ucode_size_bytes));
370 size -= le32_to_cpu(hdr->ucode_size_bytes);
371 ptr += le32_to_cpu(hdr->ucode_size_bytes);
373 memset_io(ptr, 0, size);
379 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
381 struct amdgpu_device *adev =
382 container_of(work, struct amdgpu_device, vcn.idle_work.work);
383 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
387 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
388 if (adev->vcn.harvest_config & (1 << j))
391 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
392 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
395 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
396 struct dpg_pause_state new_state;
399 unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
400 new_state.fw_based = VCN_DPG_STATE__PAUSE;
402 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
404 adev->vcn.pause_dpg_mode(adev, j, &new_state);
407 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
411 if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
412 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
414 r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
417 dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
419 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
423 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
425 struct amdgpu_device *adev = ring->adev;
428 atomic_inc(&adev->vcn.total_submission_cnt);
430 if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) {
431 r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
434 dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
437 mutex_lock(&adev->vcn.vcn_pg_lock);
438 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
439 AMD_PG_STATE_UNGATE);
441 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
442 struct dpg_pause_state new_state;
444 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
445 atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
446 new_state.fw_based = VCN_DPG_STATE__PAUSE;
448 unsigned int fences = 0;
451 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
452 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
454 if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
455 new_state.fw_based = VCN_DPG_STATE__PAUSE;
457 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
460 adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
462 mutex_unlock(&adev->vcn.vcn_pg_lock);
465 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
467 if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
468 ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
469 atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
471 atomic_dec(&ring->adev->vcn.total_submission_cnt);
473 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
476 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
478 struct amdgpu_device *adev = ring->adev;
483 /* VCN in SRIOV does not support direct register read/write */
484 if (amdgpu_sriov_vf(adev))
487 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
488 r = amdgpu_ring_alloc(ring, 3);
491 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0));
492 amdgpu_ring_write(ring, 0xDEADBEEF);
493 amdgpu_ring_commit(ring);
494 for (i = 0; i < adev->usec_timeout; i++) {
495 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
496 if (tmp == 0xDEADBEEF)
501 if (i >= adev->usec_timeout)
507 int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
509 struct amdgpu_device *adev = ring->adev;
514 if (amdgpu_sriov_vf(adev))
517 r = amdgpu_ring_alloc(ring, 16);
521 rptr = amdgpu_ring_get_rptr(ring);
523 amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
524 amdgpu_ring_commit(ring);
526 for (i = 0; i < adev->usec_timeout; i++) {
527 if (amdgpu_ring_get_rptr(ring) != rptr)
532 if (i >= adev->usec_timeout)
538 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
539 struct amdgpu_ib *ib_msg,
540 struct dma_fence **fence)
542 struct amdgpu_device *adev = ring->adev;
543 struct dma_fence *f = NULL;
544 struct amdgpu_job *job;
545 struct amdgpu_ib *ib;
546 uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
549 r = amdgpu_job_alloc_with_ib(adev, 64,
550 AMDGPU_IB_POOL_DIRECT, &job);
555 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
557 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
558 ib->ptr[3] = addr >> 32;
559 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0);
561 for (i = 6; i < 16; i += 2) {
562 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0);
567 r = amdgpu_job_submit_direct(job, ring, &f);
571 amdgpu_ib_free(adev, ib_msg, f);
574 *fence = dma_fence_get(f);
580 amdgpu_job_free(job);
582 amdgpu_ib_free(adev, ib_msg, f);
586 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
587 struct amdgpu_ib *ib)
589 struct amdgpu_device *adev = ring->adev;
593 memset(ib, 0, sizeof(*ib));
594 r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
595 AMDGPU_IB_POOL_DIRECT,
600 msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
601 msg[0] = cpu_to_le32(0x00000028);
602 msg[1] = cpu_to_le32(0x00000038);
603 msg[2] = cpu_to_le32(0x00000001);
604 msg[3] = cpu_to_le32(0x00000000);
605 msg[4] = cpu_to_le32(handle);
606 msg[5] = cpu_to_le32(0x00000000);
607 msg[6] = cpu_to_le32(0x00000001);
608 msg[7] = cpu_to_le32(0x00000028);
609 msg[8] = cpu_to_le32(0x00000010);
610 msg[9] = cpu_to_le32(0x00000000);
611 msg[10] = cpu_to_le32(0x00000007);
612 msg[11] = cpu_to_le32(0x00000000);
613 msg[12] = cpu_to_le32(0x00000780);
614 msg[13] = cpu_to_le32(0x00000440);
615 for (i = 14; i < 1024; ++i)
616 msg[i] = cpu_to_le32(0x0);
621 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
622 struct amdgpu_ib *ib)
624 struct amdgpu_device *adev = ring->adev;
628 memset(ib, 0, sizeof(*ib));
629 r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
630 AMDGPU_IB_POOL_DIRECT,
635 msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
636 msg[0] = cpu_to_le32(0x00000028);
637 msg[1] = cpu_to_le32(0x00000018);
638 msg[2] = cpu_to_le32(0x00000000);
639 msg[3] = cpu_to_le32(0x00000002);
640 msg[4] = cpu_to_le32(handle);
641 msg[5] = cpu_to_le32(0x00000000);
642 for (i = 6; i < 1024; ++i)
643 msg[i] = cpu_to_le32(0x0);
648 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
650 struct dma_fence *fence = NULL;
654 r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
658 r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
661 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
665 r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
669 r = dma_fence_wait_timeout(fence, false, timeout);
675 dma_fence_put(fence);
680 static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
681 struct amdgpu_ib *ib_msg,
682 struct dma_fence **fence)
684 struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
685 const unsigned int ib_size_dw = 64;
686 struct amdgpu_device *adev = ring->adev;
687 struct dma_fence *f = NULL;
688 struct amdgpu_job *job;
689 struct amdgpu_ib *ib;
690 uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
693 r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4,
694 AMDGPU_IB_POOL_DIRECT, &job);
701 ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
702 ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
703 decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
704 ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
705 memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
707 decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
708 decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
709 decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
711 for (i = ib->length_dw; i < ib_size_dw; ++i)
714 r = amdgpu_job_submit_direct(job, ring, &f);
718 amdgpu_ib_free(adev, ib_msg, f);
721 *fence = dma_fence_get(f);
727 amdgpu_job_free(job);
729 amdgpu_ib_free(adev, ib_msg, f);
733 int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
735 struct dma_fence *fence = NULL;
739 r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
743 r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
746 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
750 r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
754 r = dma_fence_wait_timeout(fence, false, timeout);
760 dma_fence_put(fence);
765 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
767 struct amdgpu_device *adev = ring->adev;
772 if (amdgpu_sriov_vf(adev))
775 r = amdgpu_ring_alloc(ring, 16);
779 rptr = amdgpu_ring_get_rptr(ring);
781 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
782 amdgpu_ring_commit(ring);
784 for (i = 0; i < adev->usec_timeout; i++) {
785 if (amdgpu_ring_get_rptr(ring) != rptr)
790 if (i >= adev->usec_timeout)
796 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
797 struct amdgpu_ib *ib_msg,
798 struct dma_fence **fence)
800 const unsigned ib_size_dw = 16;
801 struct amdgpu_job *job;
802 struct amdgpu_ib *ib;
803 struct dma_fence *f = NULL;
807 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
808 AMDGPU_IB_POOL_DIRECT, &job);
813 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
816 ib->ptr[ib->length_dw++] = 0x00000018;
817 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
818 ib->ptr[ib->length_dw++] = handle;
819 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
820 ib->ptr[ib->length_dw++] = addr;
821 ib->ptr[ib->length_dw++] = 0x0000000b;
823 ib->ptr[ib->length_dw++] = 0x00000014;
824 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
825 ib->ptr[ib->length_dw++] = 0x0000001c;
826 ib->ptr[ib->length_dw++] = 0x00000000;
827 ib->ptr[ib->length_dw++] = 0x00000000;
829 ib->ptr[ib->length_dw++] = 0x00000008;
830 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
832 for (i = ib->length_dw; i < ib_size_dw; ++i)
835 r = amdgpu_job_submit_direct(job, ring, &f);
840 *fence = dma_fence_get(f);
846 amdgpu_job_free(job);
850 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
851 struct amdgpu_ib *ib_msg,
852 struct dma_fence **fence)
854 const unsigned ib_size_dw = 16;
855 struct amdgpu_job *job;
856 struct amdgpu_ib *ib;
857 struct dma_fence *f = NULL;
861 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
862 AMDGPU_IB_POOL_DIRECT, &job);
867 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
870 ib->ptr[ib->length_dw++] = 0x00000018;
871 ib->ptr[ib->length_dw++] = 0x00000001;
872 ib->ptr[ib->length_dw++] = handle;
873 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
874 ib->ptr[ib->length_dw++] = addr;
875 ib->ptr[ib->length_dw++] = 0x0000000b;
877 ib->ptr[ib->length_dw++] = 0x00000014;
878 ib->ptr[ib->length_dw++] = 0x00000002;
879 ib->ptr[ib->length_dw++] = 0x0000001c;
880 ib->ptr[ib->length_dw++] = 0x00000000;
881 ib->ptr[ib->length_dw++] = 0x00000000;
883 ib->ptr[ib->length_dw++] = 0x00000008;
884 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
886 for (i = ib->length_dw; i < ib_size_dw; ++i)
889 r = amdgpu_job_submit_direct(job, ring, &f);
894 *fence = dma_fence_get(f);
900 amdgpu_job_free(job);
904 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
906 struct amdgpu_device *adev = ring->adev;
907 struct dma_fence *fence = NULL;
911 memset(&ib, 0, sizeof(ib));
912 r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
913 AMDGPU_IB_POOL_DIRECT,
918 r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
922 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
926 r = dma_fence_wait_timeout(fence, false, timeout);
933 amdgpu_ib_free(adev, &ib, fence);
934 dma_fence_put(fence);
939 enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
943 return AMDGPU_RING_PRIO_0;
945 return AMDGPU_RING_PRIO_1;
947 return AMDGPU_RING_PRIO_2;
949 return AMDGPU_RING_PRIO_0;
953 void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
958 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
959 const struct common_firmware_header *hdr;
960 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
962 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
963 if (adev->vcn.harvest_config & (1 << i))
965 /* currently only support 2 FW instances */
967 dev_info(adev->dev, "More then 2 VCN FW instances!\n");
970 idx = AMDGPU_UCODE_ID_VCN + i;
971 adev->firmware.ucode[idx].ucode_id = idx;
972 adev->firmware.ucode[idx].fw = adev->vcn.fw;
973 adev->firmware.fw_size +=
974 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
976 dev_info(adev->dev, "Will use PSP to load VCN firmware\n");