2 * Copyright 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vcn.h"
37 #include "soc15_common.h"
39 #include "vcn/vcn_1_0_offset.h"
40 #include "vcn/vcn_1_0_sh_mask.h"
42 /* 1 second timeout */
43 #define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
46 #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
47 #define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
48 #define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
50 MODULE_FIRMWARE(FIRMWARE_RAVEN);
51 MODULE_FIRMWARE(FIRMWARE_PICASSO);
52 MODULE_FIRMWARE(FIRMWARE_RAVEN2);
54 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
56 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
58 unsigned long bo_size;
60 const struct common_firmware_header *hdr;
61 unsigned char fw_check;
64 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
66 switch (adev->asic_type) {
68 if (adev->rev_id >= 8)
69 fw_name = FIRMWARE_RAVEN2;
70 else if (adev->pdev->device == 0x15d8)
71 fw_name = FIRMWARE_PICASSO;
73 fw_name = FIRMWARE_RAVEN;
79 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
81 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
86 r = amdgpu_ucode_validate(adev->vcn.fw);
88 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
90 release_firmware(adev->vcn.fw);
95 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
96 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
98 /* Bit 20-23, it is encode major and non-zero for new naming convention.
99 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
100 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
101 * is zero in old naming convention, this field is always zero so far.
102 * These four bits are used to tell which naming convention is present.
104 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
106 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
108 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
109 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
110 enc_major = fw_check;
111 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
112 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
113 DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
114 enc_major, enc_minor, dec_ver, vep, fw_rev);
116 unsigned int version_major, version_minor, family_id;
118 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
119 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
120 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
121 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
122 version_major, version_minor, family_id);
125 bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
126 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
127 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
128 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
129 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
130 &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
132 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
139 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
143 kvfree(adev->vcn.saved_bo);
145 amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
147 (void **)&adev->vcn.cpu_addr);
149 amdgpu_ring_fini(&adev->vcn.ring_dec);
151 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
152 amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
154 amdgpu_ring_fini(&adev->vcn.ring_jpeg);
156 release_firmware(adev->vcn.fw);
161 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
166 cancel_delayed_work_sync(&adev->vcn.idle_work);
168 if (adev->vcn.vcpu_bo == NULL)
171 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
172 ptr = adev->vcn.cpu_addr;
174 adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
175 if (!adev->vcn.saved_bo)
178 memcpy_fromio(adev->vcn.saved_bo, ptr, size);
183 int amdgpu_vcn_resume(struct amdgpu_device *adev)
188 if (adev->vcn.vcpu_bo == NULL)
191 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
192 ptr = adev->vcn.cpu_addr;
194 if (adev->vcn.saved_bo != NULL) {
195 memcpy_toio(ptr, adev->vcn.saved_bo, size);
196 kvfree(adev->vcn.saved_bo);
197 adev->vcn.saved_bo = NULL;
199 const struct common_firmware_header *hdr;
202 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
203 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
204 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
205 memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
206 le32_to_cpu(hdr->ucode_size_bytes));
207 size -= le32_to_cpu(hdr->ucode_size_bytes);
208 ptr += le32_to_cpu(hdr->ucode_size_bytes);
210 memset_io(ptr, 0, size);
216 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
218 struct amdgpu_device *adev =
219 container_of(work, struct amdgpu_device, vcn.idle_work.work);
220 unsigned int fences = 0;
223 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
224 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
227 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
228 struct dpg_pause_state new_state;
231 new_state.fw_based = VCN_DPG_STATE__PAUSE;
233 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
235 if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
236 new_state.jpeg = VCN_DPG_STATE__PAUSE;
238 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
240 adev->vcn.pause_dpg_mode(adev, &new_state);
243 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
244 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
247 amdgpu_gfx_off_ctrl(adev, true);
248 if (adev->pm.dpm_enabled)
249 amdgpu_dpm_enable_uvd(adev, false);
251 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
254 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
258 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
260 struct amdgpu_device *adev = ring->adev;
261 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
264 amdgpu_gfx_off_ctrl(adev, false);
265 if (adev->pm.dpm_enabled)
266 amdgpu_dpm_enable_uvd(adev, true);
268 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
269 AMD_PG_STATE_UNGATE);
272 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
273 struct dpg_pause_state new_state;
274 unsigned int fences = 0;
277 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
278 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
281 new_state.fw_based = VCN_DPG_STATE__PAUSE;
283 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
285 if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
286 new_state.jpeg = VCN_DPG_STATE__PAUSE;
288 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
290 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
291 new_state.fw_based = VCN_DPG_STATE__PAUSE;
292 else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
293 new_state.jpeg = VCN_DPG_STATE__PAUSE;
295 adev->vcn.pause_dpg_mode(adev, &new_state);
299 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
301 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
304 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
306 struct amdgpu_device *adev = ring->adev;
311 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
312 r = amdgpu_ring_alloc(ring, 3);
316 amdgpu_ring_write(ring,
317 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0));
318 amdgpu_ring_write(ring, 0xDEADBEEF);
319 amdgpu_ring_commit(ring);
320 for (i = 0; i < adev->usec_timeout; i++) {
321 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
322 if (tmp == 0xDEADBEEF)
327 if (i >= adev->usec_timeout)
333 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
334 struct amdgpu_bo *bo,
335 struct dma_fence **fence)
337 struct amdgpu_device *adev = ring->adev;
338 struct dma_fence *f = NULL;
339 struct amdgpu_job *job;
340 struct amdgpu_ib *ib;
344 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
349 addr = amdgpu_bo_gpu_offset(bo);
350 ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0);
352 ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0);
353 ib->ptr[3] = addr >> 32;
354 ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0);
356 for (i = 6; i < 16; i += 2) {
357 ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0);
362 r = amdgpu_job_submit_direct(job, ring, &f);
366 amdgpu_bo_fence(bo, f, false);
367 amdgpu_bo_unreserve(bo);
368 amdgpu_bo_unref(&bo);
371 *fence = dma_fence_get(f);
377 amdgpu_job_free(job);
380 amdgpu_bo_unreserve(bo);
381 amdgpu_bo_unref(&bo);
385 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
386 struct dma_fence **fence)
388 struct amdgpu_device *adev = ring->adev;
389 struct amdgpu_bo *bo = NULL;
393 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
394 AMDGPU_GEM_DOMAIN_VRAM,
395 &bo, NULL, (void **)&msg);
399 msg[0] = cpu_to_le32(0x00000028);
400 msg[1] = cpu_to_le32(0x00000038);
401 msg[2] = cpu_to_le32(0x00000001);
402 msg[3] = cpu_to_le32(0x00000000);
403 msg[4] = cpu_to_le32(handle);
404 msg[5] = cpu_to_le32(0x00000000);
405 msg[6] = cpu_to_le32(0x00000001);
406 msg[7] = cpu_to_le32(0x00000028);
407 msg[8] = cpu_to_le32(0x00000010);
408 msg[9] = cpu_to_le32(0x00000000);
409 msg[10] = cpu_to_le32(0x00000007);
410 msg[11] = cpu_to_le32(0x00000000);
411 msg[12] = cpu_to_le32(0x00000780);
412 msg[13] = cpu_to_le32(0x00000440);
413 for (i = 14; i < 1024; ++i)
414 msg[i] = cpu_to_le32(0x0);
416 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
419 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
420 struct dma_fence **fence)
422 struct amdgpu_device *adev = ring->adev;
423 struct amdgpu_bo *bo = NULL;
427 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
428 AMDGPU_GEM_DOMAIN_VRAM,
429 &bo, NULL, (void **)&msg);
433 msg[0] = cpu_to_le32(0x00000028);
434 msg[1] = cpu_to_le32(0x00000018);
435 msg[2] = cpu_to_le32(0x00000000);
436 msg[3] = cpu_to_le32(0x00000002);
437 msg[4] = cpu_to_le32(handle);
438 msg[5] = cpu_to_le32(0x00000000);
439 for (i = 6; i < 1024; ++i)
440 msg[i] = cpu_to_le32(0x0);
442 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
445 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
447 struct dma_fence *fence;
450 r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
454 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
458 r = dma_fence_wait_timeout(fence, false, timeout);
464 dma_fence_put(fence);
469 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
471 struct amdgpu_device *adev = ring->adev;
476 r = amdgpu_ring_alloc(ring, 16);
480 rptr = amdgpu_ring_get_rptr(ring);
482 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
483 amdgpu_ring_commit(ring);
485 for (i = 0; i < adev->usec_timeout; i++) {
486 if (amdgpu_ring_get_rptr(ring) != rptr)
491 if (i >= adev->usec_timeout)
497 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
498 struct dma_fence **fence)
500 const unsigned ib_size_dw = 16;
501 struct amdgpu_job *job;
502 struct amdgpu_ib *ib;
503 struct dma_fence *f = NULL;
507 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
512 dummy = ib->gpu_addr + 1024;
515 ib->ptr[ib->length_dw++] = 0x00000018;
516 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
517 ib->ptr[ib->length_dw++] = handle;
518 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
519 ib->ptr[ib->length_dw++] = dummy;
520 ib->ptr[ib->length_dw++] = 0x0000000b;
522 ib->ptr[ib->length_dw++] = 0x00000014;
523 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
524 ib->ptr[ib->length_dw++] = 0x0000001c;
525 ib->ptr[ib->length_dw++] = 0x00000000;
526 ib->ptr[ib->length_dw++] = 0x00000000;
528 ib->ptr[ib->length_dw++] = 0x00000008;
529 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
531 for (i = ib->length_dw; i < ib_size_dw; ++i)
534 r = amdgpu_job_submit_direct(job, ring, &f);
539 *fence = dma_fence_get(f);
545 amdgpu_job_free(job);
549 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
550 struct dma_fence **fence)
552 const unsigned ib_size_dw = 16;
553 struct amdgpu_job *job;
554 struct amdgpu_ib *ib;
555 struct dma_fence *f = NULL;
559 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
564 dummy = ib->gpu_addr + 1024;
567 ib->ptr[ib->length_dw++] = 0x00000018;
568 ib->ptr[ib->length_dw++] = 0x00000001;
569 ib->ptr[ib->length_dw++] = handle;
570 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
571 ib->ptr[ib->length_dw++] = dummy;
572 ib->ptr[ib->length_dw++] = 0x0000000b;
574 ib->ptr[ib->length_dw++] = 0x00000014;
575 ib->ptr[ib->length_dw++] = 0x00000002;
576 ib->ptr[ib->length_dw++] = 0x0000001c;
577 ib->ptr[ib->length_dw++] = 0x00000000;
578 ib->ptr[ib->length_dw++] = 0x00000000;
580 ib->ptr[ib->length_dw++] = 0x00000008;
581 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
583 for (i = ib->length_dw; i < ib_size_dw; ++i)
586 r = amdgpu_job_submit_direct(job, ring, &f);
591 *fence = dma_fence_get(f);
597 amdgpu_job_free(job);
601 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
603 struct dma_fence *fence = NULL;
606 r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
610 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
614 r = dma_fence_wait_timeout(fence, false, timeout);
621 dma_fence_put(fence);
625 int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
627 struct amdgpu_device *adev = ring->adev;
632 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
633 r = amdgpu_ring_alloc(ring, 3);
638 amdgpu_ring_write(ring,
639 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, 0));
640 amdgpu_ring_write(ring, 0xDEADBEEF);
641 amdgpu_ring_commit(ring);
643 for (i = 0; i < adev->usec_timeout; i++) {
644 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
645 if (tmp == 0xDEADBEEF)
650 if (i >= adev->usec_timeout)
656 static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
657 struct dma_fence **fence)
659 struct amdgpu_device *adev = ring->adev;
660 struct amdgpu_job *job;
661 struct amdgpu_ib *ib;
662 struct dma_fence *f = NULL;
663 const unsigned ib_size_dw = 16;
666 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
672 ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, PACKETJ_TYPE0);
673 ib->ptr[1] = 0xDEADBEEF;
674 for (i = 2; i < 16; i += 2) {
675 ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
680 r = amdgpu_job_submit_direct(job, ring, &f);
685 *fence = dma_fence_get(f);
691 amdgpu_job_free(job);
695 int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
697 struct amdgpu_device *adev = ring->adev;
700 struct dma_fence *fence = NULL;
703 r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
707 r = dma_fence_wait_timeout(fence, false, timeout);
717 for (i = 0; i < adev->usec_timeout; i++) {
718 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
719 if (tmp == 0xDEADBEEF)
724 if (i >= adev->usec_timeout)
727 dma_fence_put(fence);