2 * Copyright 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
27 #include <linux/firmware.h>
28 #include <linux/module.h>
33 #include "amdgpu_pm.h"
34 #include "amdgpu_vcn.h"
36 #include "soc15_common.h"
38 #include "vcn/vcn_1_0_offset.h"
40 /* 1 second timeout */
41 #define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
44 #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
46 MODULE_FIRMWARE(FIRMWARE_RAVEN);
48 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
50 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
52 struct amdgpu_ring *ring;
53 struct drm_sched_rq *rq;
54 unsigned long bo_size;
56 const struct common_firmware_header *hdr;
57 unsigned version_major, version_minor, family_id;
60 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
62 switch (adev->asic_type) {
64 fw_name = FIRMWARE_RAVEN;
70 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
72 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
77 r = amdgpu_ucode_validate(adev->vcn.fw);
79 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
81 release_firmware(adev->vcn.fw);
86 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
87 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
88 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
89 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
90 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
91 version_major, version_minor, family_id);
94 bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
95 + AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
96 + AMDGPU_VCN_SESSION_SIZE * 40;
97 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
98 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
99 &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
101 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
105 ring = &adev->vcn.ring_dec;
106 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
107 r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
110 DRM_ERROR("Failed setting up VCN dec run queue.\n");
114 ring = &adev->vcn.ring_enc[0];
115 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
116 r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
119 DRM_ERROR("Failed setting up VCN enc run queue.\n");
126 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
130 kfree(adev->vcn.saved_bo);
132 drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
134 drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
136 amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
138 (void **)&adev->vcn.cpu_addr);
140 amdgpu_ring_fini(&adev->vcn.ring_dec);
142 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
143 amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
145 release_firmware(adev->vcn.fw);
150 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
155 if (adev->vcn.vcpu_bo == NULL)
158 cancel_delayed_work_sync(&adev->vcn.idle_work);
160 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
161 ptr = adev->vcn.cpu_addr;
163 adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL);
164 if (!adev->vcn.saved_bo)
167 memcpy_fromio(adev->vcn.saved_bo, ptr, size);
172 int amdgpu_vcn_resume(struct amdgpu_device *adev)
177 if (adev->vcn.vcpu_bo == NULL)
180 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
181 ptr = adev->vcn.cpu_addr;
183 if (adev->vcn.saved_bo != NULL) {
184 memcpy_toio(ptr, adev->vcn.saved_bo, size);
185 kfree(adev->vcn.saved_bo);
186 adev->vcn.saved_bo = NULL;
188 const struct common_firmware_header *hdr;
191 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
192 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
193 memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
194 le32_to_cpu(hdr->ucode_size_bytes));
195 size -= le32_to_cpu(hdr->ucode_size_bytes);
196 ptr += le32_to_cpu(hdr->ucode_size_bytes);
197 memset_io(ptr, 0, size);
203 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
205 struct amdgpu_device *adev =
206 container_of(work, struct amdgpu_device, vcn.idle_work.work);
207 unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
210 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
211 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
215 if (adev->pm.dpm_enabled)
216 amdgpu_dpm_enable_uvd(adev, false);
218 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
221 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
225 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
227 struct amdgpu_device *adev = ring->adev;
228 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
230 if (set_clocks && adev->pm.dpm_enabled) {
231 if (adev->pm.dpm_enabled)
232 amdgpu_dpm_enable_uvd(adev, true);
234 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
235 AMD_PG_STATE_UNGATE);
239 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
241 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
244 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
246 struct amdgpu_device *adev = ring->adev;
251 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
252 r = amdgpu_ring_alloc(ring, 3);
254 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
258 amdgpu_ring_write(ring,
259 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
260 amdgpu_ring_write(ring, 0xDEADBEEF);
261 amdgpu_ring_commit(ring);
262 for (i = 0; i < adev->usec_timeout; i++) {
263 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
264 if (tmp == 0xDEADBEEF)
269 if (i < adev->usec_timeout) {
270 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
273 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
280 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
281 struct amdgpu_bo *bo, bool direct,
282 struct dma_fence **fence)
284 struct amdgpu_device *adev = ring->adev;
285 struct dma_fence *f = NULL;
286 struct amdgpu_job *job;
287 struct amdgpu_ib *ib;
291 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
296 addr = amdgpu_bo_gpu_offset(bo);
297 ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0);
299 ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0);
300 ib->ptr[3] = addr >> 32;
301 ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0);
303 for (i = 6; i < 16; i += 2) {
304 ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0);
310 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
311 job->fence = dma_fence_get(f);
315 amdgpu_job_free(job);
317 r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec,
318 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
323 amdgpu_bo_fence(bo, f, false);
324 amdgpu_bo_unreserve(bo);
325 amdgpu_bo_unref(&bo);
328 *fence = dma_fence_get(f);
334 amdgpu_job_free(job);
337 amdgpu_bo_unreserve(bo);
338 amdgpu_bo_unref(&bo);
342 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
343 struct dma_fence **fence)
345 struct amdgpu_device *adev = ring->adev;
346 struct amdgpu_bo *bo = NULL;
350 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
351 AMDGPU_GEM_DOMAIN_VRAM,
352 &bo, NULL, (void **)&msg);
356 msg[0] = cpu_to_le32(0x00000028);
357 msg[1] = cpu_to_le32(0x00000038);
358 msg[2] = cpu_to_le32(0x00000001);
359 msg[3] = cpu_to_le32(0x00000000);
360 msg[4] = cpu_to_le32(handle);
361 msg[5] = cpu_to_le32(0x00000000);
362 msg[6] = cpu_to_le32(0x00000001);
363 msg[7] = cpu_to_le32(0x00000028);
364 msg[8] = cpu_to_le32(0x00000010);
365 msg[9] = cpu_to_le32(0x00000000);
366 msg[10] = cpu_to_le32(0x00000007);
367 msg[11] = cpu_to_le32(0x00000000);
368 msg[12] = cpu_to_le32(0x00000780);
369 msg[13] = cpu_to_le32(0x00000440);
370 for (i = 14; i < 1024; ++i)
371 msg[i] = cpu_to_le32(0x0);
373 return amdgpu_vcn_dec_send_msg(ring, bo, true, fence);
376 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
377 bool direct, struct dma_fence **fence)
379 struct amdgpu_device *adev = ring->adev;
380 struct amdgpu_bo *bo = NULL;
384 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
385 AMDGPU_GEM_DOMAIN_VRAM,
386 &bo, NULL, (void **)&msg);
390 msg[0] = cpu_to_le32(0x00000028);
391 msg[1] = cpu_to_le32(0x00000018);
392 msg[2] = cpu_to_le32(0x00000000);
393 msg[3] = cpu_to_le32(0x00000002);
394 msg[4] = cpu_to_le32(handle);
395 msg[5] = cpu_to_le32(0x00000000);
396 for (i = 6; i < 1024; ++i)
397 msg[i] = cpu_to_le32(0x0);
399 return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
402 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
404 struct dma_fence *fence;
407 r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
409 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
413 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence);
415 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
419 r = dma_fence_wait_timeout(fence, false, timeout);
421 DRM_ERROR("amdgpu: IB test timed out.\n");
424 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
426 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
430 dma_fence_put(fence);
436 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
438 struct amdgpu_device *adev = ring->adev;
439 uint32_t rptr = amdgpu_ring_get_rptr(ring);
443 r = amdgpu_ring_alloc(ring, 16);
445 DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
449 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
450 amdgpu_ring_commit(ring);
452 for (i = 0; i < adev->usec_timeout; i++) {
453 if (amdgpu_ring_get_rptr(ring) != rptr)
458 if (i < adev->usec_timeout) {
459 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
462 DRM_ERROR("amdgpu: ring %d test failed\n",
470 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
471 struct dma_fence **fence)
473 const unsigned ib_size_dw = 16;
474 struct amdgpu_job *job;
475 struct amdgpu_ib *ib;
476 struct dma_fence *f = NULL;
480 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
485 dummy = ib->gpu_addr + 1024;
488 ib->ptr[ib->length_dw++] = 0x00000018;
489 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
490 ib->ptr[ib->length_dw++] = handle;
491 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
492 ib->ptr[ib->length_dw++] = dummy;
493 ib->ptr[ib->length_dw++] = 0x0000000b;
495 ib->ptr[ib->length_dw++] = 0x00000014;
496 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
497 ib->ptr[ib->length_dw++] = 0x0000001c;
498 ib->ptr[ib->length_dw++] = 0x00000000;
499 ib->ptr[ib->length_dw++] = 0x00000000;
501 ib->ptr[ib->length_dw++] = 0x00000008;
502 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
504 for (i = ib->length_dw; i < ib_size_dw; ++i)
507 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
508 job->fence = dma_fence_get(f);
512 amdgpu_job_free(job);
514 *fence = dma_fence_get(f);
520 amdgpu_job_free(job);
524 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
525 struct dma_fence **fence)
527 const unsigned ib_size_dw = 16;
528 struct amdgpu_job *job;
529 struct amdgpu_ib *ib;
530 struct dma_fence *f = NULL;
534 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
539 dummy = ib->gpu_addr + 1024;
542 ib->ptr[ib->length_dw++] = 0x00000018;
543 ib->ptr[ib->length_dw++] = 0x00000001;
544 ib->ptr[ib->length_dw++] = handle;
545 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
546 ib->ptr[ib->length_dw++] = dummy;
547 ib->ptr[ib->length_dw++] = 0x0000000b;
549 ib->ptr[ib->length_dw++] = 0x00000014;
550 ib->ptr[ib->length_dw++] = 0x00000002;
551 ib->ptr[ib->length_dw++] = 0x0000001c;
552 ib->ptr[ib->length_dw++] = 0x00000000;
553 ib->ptr[ib->length_dw++] = 0x00000000;
555 ib->ptr[ib->length_dw++] = 0x00000008;
556 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
558 for (i = ib->length_dw; i < ib_size_dw; ++i)
561 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
562 job->fence = dma_fence_get(f);
566 amdgpu_job_free(job);
568 *fence = dma_fence_get(f);
574 amdgpu_job_free(job);
578 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
580 struct dma_fence *fence = NULL;
583 r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
585 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
589 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
591 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
595 r = dma_fence_wait_timeout(fence, false, timeout);
597 DRM_ERROR("amdgpu: IB test timed out.\n");
600 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
602 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
606 dma_fence_put(fence);