2 * Copyright 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
27 #include <linux/firmware.h>
28 #include <linux/module.h>
33 #include "amdgpu_pm.h"
34 #include "amdgpu_vcn.h"
36 #include "soc15_common.h"
38 #include "vcn/vcn_1_0_offset.h"
39 #include "vcn/vcn_1_0_sh_mask.h"
41 /* 1 second timeout */
42 #define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
45 #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
46 #define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
47 #define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
49 MODULE_FIRMWARE(FIRMWARE_RAVEN);
50 MODULE_FIRMWARE(FIRMWARE_PICASSO);
51 MODULE_FIRMWARE(FIRMWARE_RAVEN2);
53 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
55 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
57 unsigned long bo_size;
59 const struct common_firmware_header *hdr;
60 unsigned char fw_check;
63 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
65 switch (adev->asic_type) {
67 if (adev->rev_id >= 8)
68 fw_name = FIRMWARE_RAVEN2;
69 else if (adev->pdev->device == 0x15d8)
70 fw_name = FIRMWARE_PICASSO;
72 fw_name = FIRMWARE_RAVEN;
78 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
80 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
85 r = amdgpu_ucode_validate(adev->vcn.fw);
87 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
89 release_firmware(adev->vcn.fw);
94 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
95 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
97 /* Bit 20-23, it is encode major and non-zero for new naming convention.
98 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
99 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
100 * is zero in old naming convention, this field is always zero so far.
101 * These four bits are used to tell which naming convention is present.
103 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
105 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
107 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
108 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
109 enc_major = fw_check;
110 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
111 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
112 DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
113 enc_major, enc_minor, dec_ver, vep, fw_rev);
115 unsigned int version_major, version_minor, family_id;
117 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
118 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
119 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
120 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
121 version_major, version_minor, family_id);
124 bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
125 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
126 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
127 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
128 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
129 &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
131 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
138 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
142 kvfree(adev->vcn.saved_bo);
144 amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
146 (void **)&adev->vcn.cpu_addr);
148 amdgpu_ring_fini(&adev->vcn.ring_dec);
150 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
151 amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
153 amdgpu_ring_fini(&adev->vcn.ring_jpeg);
155 release_firmware(adev->vcn.fw);
160 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
165 cancel_delayed_work_sync(&adev->vcn.idle_work);
167 if (adev->vcn.vcpu_bo == NULL)
170 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
171 ptr = adev->vcn.cpu_addr;
173 adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
174 if (!adev->vcn.saved_bo)
177 memcpy_fromio(adev->vcn.saved_bo, ptr, size);
182 int amdgpu_vcn_resume(struct amdgpu_device *adev)
187 if (adev->vcn.vcpu_bo == NULL)
190 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
191 ptr = adev->vcn.cpu_addr;
193 if (adev->vcn.saved_bo != NULL) {
194 memcpy_toio(ptr, adev->vcn.saved_bo, size);
195 kvfree(adev->vcn.saved_bo);
196 adev->vcn.saved_bo = NULL;
198 const struct common_firmware_header *hdr;
201 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
202 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
203 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
204 memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
205 le32_to_cpu(hdr->ucode_size_bytes));
206 size -= le32_to_cpu(hdr->ucode_size_bytes);
207 ptr += le32_to_cpu(hdr->ucode_size_bytes);
209 memset_io(ptr, 0, size);
215 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
217 struct amdgpu_device *adev =
218 container_of(work, struct amdgpu_device, vcn.idle_work.work);
219 unsigned int fences = 0;
222 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
223 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
226 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
227 struct dpg_pause_state new_state;
230 new_state.fw_based = VCN_DPG_STATE__PAUSE;
232 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
234 if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
235 new_state.jpeg = VCN_DPG_STATE__PAUSE;
237 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
239 adev->vcn.pause_dpg_mode(adev, &new_state);
242 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
243 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
246 amdgpu_gfx_off_ctrl(adev, true);
247 if (adev->pm.dpm_enabled)
248 amdgpu_dpm_enable_uvd(adev, false);
250 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
253 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
257 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
259 struct amdgpu_device *adev = ring->adev;
260 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
263 amdgpu_gfx_off_ctrl(adev, false);
264 if (adev->pm.dpm_enabled)
265 amdgpu_dpm_enable_uvd(adev, true);
267 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
268 AMD_PG_STATE_UNGATE);
271 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
272 struct dpg_pause_state new_state;
273 unsigned int fences = 0;
276 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
277 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
280 new_state.fw_based = VCN_DPG_STATE__PAUSE;
282 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
284 if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
285 new_state.jpeg = VCN_DPG_STATE__PAUSE;
287 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
289 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
290 new_state.fw_based = VCN_DPG_STATE__PAUSE;
291 else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
292 new_state.jpeg = VCN_DPG_STATE__PAUSE;
294 adev->vcn.pause_dpg_mode(adev, &new_state);
298 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
300 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
303 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
305 struct amdgpu_device *adev = ring->adev;
310 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
311 r = amdgpu_ring_alloc(ring, 3);
315 amdgpu_ring_write(ring,
316 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0));
317 amdgpu_ring_write(ring, 0xDEADBEEF);
318 amdgpu_ring_commit(ring);
319 for (i = 0; i < adev->usec_timeout; i++) {
320 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
321 if (tmp == 0xDEADBEEF)
326 if (i >= adev->usec_timeout)
332 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
333 struct amdgpu_bo *bo,
334 struct dma_fence **fence)
336 struct amdgpu_device *adev = ring->adev;
337 struct dma_fence *f = NULL;
338 struct amdgpu_job *job;
339 struct amdgpu_ib *ib;
343 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
348 addr = amdgpu_bo_gpu_offset(bo);
349 ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0);
351 ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0);
352 ib->ptr[3] = addr >> 32;
353 ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0);
355 for (i = 6; i < 16; i += 2) {
356 ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0);
361 r = amdgpu_job_submit_direct(job, ring, &f);
365 amdgpu_bo_fence(bo, f, false);
366 amdgpu_bo_unreserve(bo);
367 amdgpu_bo_unref(&bo);
370 *fence = dma_fence_get(f);
376 amdgpu_job_free(job);
379 amdgpu_bo_unreserve(bo);
380 amdgpu_bo_unref(&bo);
384 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
385 struct dma_fence **fence)
387 struct amdgpu_device *adev = ring->adev;
388 struct amdgpu_bo *bo = NULL;
392 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
393 AMDGPU_GEM_DOMAIN_VRAM,
394 &bo, NULL, (void **)&msg);
398 msg[0] = cpu_to_le32(0x00000028);
399 msg[1] = cpu_to_le32(0x00000038);
400 msg[2] = cpu_to_le32(0x00000001);
401 msg[3] = cpu_to_le32(0x00000000);
402 msg[4] = cpu_to_le32(handle);
403 msg[5] = cpu_to_le32(0x00000000);
404 msg[6] = cpu_to_le32(0x00000001);
405 msg[7] = cpu_to_le32(0x00000028);
406 msg[8] = cpu_to_le32(0x00000010);
407 msg[9] = cpu_to_le32(0x00000000);
408 msg[10] = cpu_to_le32(0x00000007);
409 msg[11] = cpu_to_le32(0x00000000);
410 msg[12] = cpu_to_le32(0x00000780);
411 msg[13] = cpu_to_le32(0x00000440);
412 for (i = 14; i < 1024; ++i)
413 msg[i] = cpu_to_le32(0x0);
415 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
418 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
419 struct dma_fence **fence)
421 struct amdgpu_device *adev = ring->adev;
422 struct amdgpu_bo *bo = NULL;
426 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
427 AMDGPU_GEM_DOMAIN_VRAM,
428 &bo, NULL, (void **)&msg);
432 msg[0] = cpu_to_le32(0x00000028);
433 msg[1] = cpu_to_le32(0x00000018);
434 msg[2] = cpu_to_le32(0x00000000);
435 msg[3] = cpu_to_le32(0x00000002);
436 msg[4] = cpu_to_le32(handle);
437 msg[5] = cpu_to_le32(0x00000000);
438 for (i = 6; i < 1024; ++i)
439 msg[i] = cpu_to_le32(0x0);
441 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
444 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
446 struct dma_fence *fence;
449 r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
453 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
457 r = dma_fence_wait_timeout(fence, false, timeout);
463 dma_fence_put(fence);
468 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
470 struct amdgpu_device *adev = ring->adev;
475 r = amdgpu_ring_alloc(ring, 16);
479 rptr = amdgpu_ring_get_rptr(ring);
481 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
482 amdgpu_ring_commit(ring);
484 for (i = 0; i < adev->usec_timeout; i++) {
485 if (amdgpu_ring_get_rptr(ring) != rptr)
490 if (i >= adev->usec_timeout)
496 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
497 struct dma_fence **fence)
499 const unsigned ib_size_dw = 16;
500 struct amdgpu_job *job;
501 struct amdgpu_ib *ib;
502 struct dma_fence *f = NULL;
506 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
511 dummy = ib->gpu_addr + 1024;
514 ib->ptr[ib->length_dw++] = 0x00000018;
515 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
516 ib->ptr[ib->length_dw++] = handle;
517 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
518 ib->ptr[ib->length_dw++] = dummy;
519 ib->ptr[ib->length_dw++] = 0x0000000b;
521 ib->ptr[ib->length_dw++] = 0x00000014;
522 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
523 ib->ptr[ib->length_dw++] = 0x0000001c;
524 ib->ptr[ib->length_dw++] = 0x00000000;
525 ib->ptr[ib->length_dw++] = 0x00000000;
527 ib->ptr[ib->length_dw++] = 0x00000008;
528 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
530 for (i = ib->length_dw; i < ib_size_dw; ++i)
533 r = amdgpu_job_submit_direct(job, ring, &f);
538 *fence = dma_fence_get(f);
544 amdgpu_job_free(job);
548 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
549 struct dma_fence **fence)
551 const unsigned ib_size_dw = 16;
552 struct amdgpu_job *job;
553 struct amdgpu_ib *ib;
554 struct dma_fence *f = NULL;
558 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
563 dummy = ib->gpu_addr + 1024;
566 ib->ptr[ib->length_dw++] = 0x00000018;
567 ib->ptr[ib->length_dw++] = 0x00000001;
568 ib->ptr[ib->length_dw++] = handle;
569 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
570 ib->ptr[ib->length_dw++] = dummy;
571 ib->ptr[ib->length_dw++] = 0x0000000b;
573 ib->ptr[ib->length_dw++] = 0x00000014;
574 ib->ptr[ib->length_dw++] = 0x00000002;
575 ib->ptr[ib->length_dw++] = 0x0000001c;
576 ib->ptr[ib->length_dw++] = 0x00000000;
577 ib->ptr[ib->length_dw++] = 0x00000000;
579 ib->ptr[ib->length_dw++] = 0x00000008;
580 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
582 for (i = ib->length_dw; i < ib_size_dw; ++i)
585 r = amdgpu_job_submit_direct(job, ring, &f);
590 *fence = dma_fence_get(f);
596 amdgpu_job_free(job);
600 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
602 struct dma_fence *fence = NULL;
605 r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
609 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
613 r = dma_fence_wait_timeout(fence, false, timeout);
620 dma_fence_put(fence);
624 int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
626 struct amdgpu_device *adev = ring->adev;
631 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
632 r = amdgpu_ring_alloc(ring, 3);
637 amdgpu_ring_write(ring,
638 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, 0));
639 amdgpu_ring_write(ring, 0xDEADBEEF);
640 amdgpu_ring_commit(ring);
642 for (i = 0; i < adev->usec_timeout; i++) {
643 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
644 if (tmp == 0xDEADBEEF)
649 if (i >= adev->usec_timeout)
655 static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
656 struct dma_fence **fence)
658 struct amdgpu_device *adev = ring->adev;
659 struct amdgpu_job *job;
660 struct amdgpu_ib *ib;
661 struct dma_fence *f = NULL;
662 const unsigned ib_size_dw = 16;
665 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
671 ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, PACKETJ_TYPE0);
672 ib->ptr[1] = 0xDEADBEEF;
673 for (i = 2; i < 16; i += 2) {
674 ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
679 r = amdgpu_job_submit_direct(job, ring, &f);
684 *fence = dma_fence_get(f);
690 amdgpu_job_free(job);
694 int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
696 struct amdgpu_device *adev = ring->adev;
699 struct dma_fence *fence = NULL;
702 r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
706 r = dma_fence_wait_timeout(fence, false, timeout);
716 for (i = 0; i < adev->usec_timeout; i++) {
717 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
718 if (tmp == 0xDEADBEEF)
723 if (i >= adev->usec_timeout)
726 dma_fence_put(fence);