2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 #include <linux/firmware.h>
29 #include <linux/module.h>
32 #include <drm/drm_drv.h>
35 #include "amdgpu_pm.h"
36 #include "amdgpu_vce.h"
37 #include "amdgpu_cs.h"
40 /* 1 second timeout */
41 #define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
44 #ifdef CONFIG_DRM_AMDGPU_CIK
45 #define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin"
46 #define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
47 #define FIRMWARE_KAVERI "amdgpu/kaveri_vce.bin"
48 #define FIRMWARE_HAWAII "amdgpu/hawaii_vce.bin"
49 #define FIRMWARE_MULLINS "amdgpu/mullins_vce.bin"
51 #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
52 #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
53 #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
54 #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
55 #define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
56 #define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
57 #define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
58 #define FIRMWARE_VEGAM "amdgpu/vegam_vce.bin"
60 #define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin"
61 #define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin"
62 #define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin"
64 #ifdef CONFIG_DRM_AMDGPU_CIK
65 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
66 MODULE_FIRMWARE(FIRMWARE_KABINI);
67 MODULE_FIRMWARE(FIRMWARE_KAVERI);
68 MODULE_FIRMWARE(FIRMWARE_HAWAII);
69 MODULE_FIRMWARE(FIRMWARE_MULLINS);
71 MODULE_FIRMWARE(FIRMWARE_TONGA);
72 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
73 MODULE_FIRMWARE(FIRMWARE_FIJI);
74 MODULE_FIRMWARE(FIRMWARE_STONEY);
75 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
76 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
77 MODULE_FIRMWARE(FIRMWARE_POLARIS12);
78 MODULE_FIRMWARE(FIRMWARE_VEGAM);
80 MODULE_FIRMWARE(FIRMWARE_VEGA10);
81 MODULE_FIRMWARE(FIRMWARE_VEGA12);
82 MODULE_FIRMWARE(FIRMWARE_VEGA20);
84 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
85 static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
86 struct dma_fence **fence);
87 static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
88 bool direct, struct dma_fence **fence);
91 * amdgpu_vce_sw_init - allocate memory, load vce firmware
93 * @adev: amdgpu_device pointer
94 * @size: size for the new BO
96 * First step to get VCE online, allocate memory and load the firmware
98 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
101 const struct common_firmware_header *hdr;
102 unsigned ucode_version, version_major, version_minor, binary_id;
105 switch (adev->asic_type) {
106 #ifdef CONFIG_DRM_AMDGPU_CIK
108 fw_name = FIRMWARE_BONAIRE;
111 fw_name = FIRMWARE_KAVERI;
114 fw_name = FIRMWARE_KABINI;
117 fw_name = FIRMWARE_HAWAII;
120 fw_name = FIRMWARE_MULLINS;
124 fw_name = FIRMWARE_TONGA;
127 fw_name = FIRMWARE_CARRIZO;
130 fw_name = FIRMWARE_FIJI;
133 fw_name = FIRMWARE_STONEY;
136 fw_name = FIRMWARE_POLARIS10;
139 fw_name = FIRMWARE_POLARIS11;
142 fw_name = FIRMWARE_POLARIS12;
145 fw_name = FIRMWARE_VEGAM;
148 fw_name = FIRMWARE_VEGA10;
151 fw_name = FIRMWARE_VEGA12;
154 fw_name = FIRMWARE_VEGA20;
161 r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
163 dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
168 r = amdgpu_ucode_validate(adev->vce.fw);
170 dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
172 release_firmware(adev->vce.fw);
177 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
179 ucode_version = le32_to_cpu(hdr->ucode_version);
180 version_major = (ucode_version >> 20) & 0xfff;
181 version_minor = (ucode_version >> 8) & 0xfff;
182 binary_id = ucode_version & 0xff;
183 DRM_INFO("Found VCE firmware Version: %d.%d Binary ID: %d\n",
184 version_major, version_minor, binary_id);
185 adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
188 r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
189 AMDGPU_GEM_DOMAIN_VRAM |
190 AMDGPU_GEM_DOMAIN_GTT,
192 &adev->vce.gpu_addr, &adev->vce.cpu_addr);
194 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
198 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
199 atomic_set(&adev->vce.handles[i], 0);
200 adev->vce.filp[i] = NULL;
203 INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
204 mutex_init(&adev->vce.idle_mutex);
210 * amdgpu_vce_sw_fini - free memory
212 * @adev: amdgpu_device pointer
214 * Last step on VCE teardown, free firmware memory
216 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
220 if (adev->vce.vcpu_bo == NULL)
223 drm_sched_entity_destroy(&adev->vce.entity);
225 amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
226 (void **)&adev->vce.cpu_addr);
228 for (i = 0; i < adev->vce.num_rings; i++)
229 amdgpu_ring_fini(&adev->vce.ring[i]);
231 release_firmware(adev->vce.fw);
232 mutex_destroy(&adev->vce.idle_mutex);
238 * amdgpu_vce_entity_init - init entity
240 * @adev: amdgpu_device pointer
243 int amdgpu_vce_entity_init(struct amdgpu_device *adev)
245 struct amdgpu_ring *ring;
246 struct drm_gpu_scheduler *sched;
249 ring = &adev->vce.ring[0];
250 sched = &ring->sched;
251 r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
254 DRM_ERROR("Failed setting up VCE run queue.\n");
262 * amdgpu_vce_suspend - unpin VCE fw memory
264 * @adev: amdgpu_device pointer
267 int amdgpu_vce_suspend(struct amdgpu_device *adev)
271 cancel_delayed_work_sync(&adev->vce.idle_work);
273 if (adev->vce.vcpu_bo == NULL)
276 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
277 if (atomic_read(&adev->vce.handles[i]))
280 if (i == AMDGPU_MAX_VCE_HANDLES)
283 /* TODO: suspending running encoding sessions isn't supported */
288 * amdgpu_vce_resume - pin VCE fw memory
290 * @adev: amdgpu_device pointer
293 int amdgpu_vce_resume(struct amdgpu_device *adev)
296 const struct common_firmware_header *hdr;
300 if (adev->vce.vcpu_bo == NULL)
303 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
305 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
309 r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
311 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
312 dev_err(adev->dev, "(%d) VCE map failed\n", r);
316 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
317 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
319 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
320 memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
321 adev->vce.fw->size - offset);
325 amdgpu_bo_kunmap(adev->vce.vcpu_bo);
327 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
333 * amdgpu_vce_idle_work_handler - power off VCE
335 * @work: pointer to work structure
337 * power of VCE when it's not used any more
339 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
341 struct amdgpu_device *adev =
342 container_of(work, struct amdgpu_device, vce.idle_work.work);
343 unsigned i, count = 0;
345 for (i = 0; i < adev->vce.num_rings; i++)
346 count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
349 if (adev->pm.dpm_enabled) {
350 amdgpu_dpm_enable_vce(adev, false);
352 amdgpu_asic_set_vce_clocks(adev, 0, 0);
353 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
355 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
359 schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
364 * amdgpu_vce_ring_begin_use - power up VCE
368 * Make sure VCE is powerd up when we want to use it
370 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
372 struct amdgpu_device *adev = ring->adev;
375 if (amdgpu_sriov_vf(adev))
378 mutex_lock(&adev->vce.idle_mutex);
379 set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
381 if (adev->pm.dpm_enabled) {
382 amdgpu_dpm_enable_vce(adev, true);
384 amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
385 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
386 AMD_CG_STATE_UNGATE);
387 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
388 AMD_PG_STATE_UNGATE);
392 mutex_unlock(&adev->vce.idle_mutex);
396 * amdgpu_vce_ring_end_use - power VCE down
400 * Schedule work to power VCE down again
402 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
404 if (!amdgpu_sriov_vf(ring->adev))
405 schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
409 * amdgpu_vce_free_handles - free still open VCE handles
411 * @adev: amdgpu_device pointer
412 * @filp: drm file pointer
414 * Close all VCE handles still open by this file pointer
416 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
418 struct amdgpu_ring *ring = &adev->vce.ring[0];
420 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
421 uint32_t handle = atomic_read(&adev->vce.handles[i]);
423 if (!handle || adev->vce.filp[i] != filp)
426 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
428 DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
430 adev->vce.filp[i] = NULL;
431 atomic_set(&adev->vce.handles[i], 0);
436 * amdgpu_vce_get_create_msg - generate a VCE create msg
438 * @ring: ring we should submit the msg to
439 * @handle: VCE session handle to use
440 * @fence: optional fence to return
442 * Open up a stream for HW test
444 static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
445 struct dma_fence **fence)
447 const unsigned ib_size_dw = 1024;
448 struct amdgpu_job *job;
449 struct amdgpu_ib *ib;
450 struct amdgpu_ib ib_msg;
451 struct dma_fence *f = NULL;
455 r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
456 AMDGPU_FENCE_OWNER_UNDEFINED,
457 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
462 memset(&ib_msg, 0, sizeof(ib_msg));
463 /* only one gpu page is needed, alloc +1 page to make addr aligned. */
464 r = amdgpu_ib_get(ring->adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
465 AMDGPU_IB_POOL_DIRECT,
471 /* let addr point to page boundary */
472 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg.gpu_addr);
474 /* stitch together an VCE create msg */
476 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
477 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
478 ib->ptr[ib->length_dw++] = handle;
480 if ((ring->adev->vce.fw_version >> 24) >= 52)
481 ib->ptr[ib->length_dw++] = 0x00000040; /* len */
483 ib->ptr[ib->length_dw++] = 0x00000030; /* len */
484 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
485 ib->ptr[ib->length_dw++] = 0x00000000;
486 ib->ptr[ib->length_dw++] = 0x00000042;
487 ib->ptr[ib->length_dw++] = 0x0000000a;
488 ib->ptr[ib->length_dw++] = 0x00000001;
489 ib->ptr[ib->length_dw++] = 0x00000080;
490 ib->ptr[ib->length_dw++] = 0x00000060;
491 ib->ptr[ib->length_dw++] = 0x00000100;
492 ib->ptr[ib->length_dw++] = 0x00000100;
493 ib->ptr[ib->length_dw++] = 0x0000000c;
494 ib->ptr[ib->length_dw++] = 0x00000000;
495 if ((ring->adev->vce.fw_version >> 24) >= 52) {
496 ib->ptr[ib->length_dw++] = 0x00000000;
497 ib->ptr[ib->length_dw++] = 0x00000000;
498 ib->ptr[ib->length_dw++] = 0x00000000;
499 ib->ptr[ib->length_dw++] = 0x00000000;
502 ib->ptr[ib->length_dw++] = 0x00000014; /* len */
503 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
504 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
505 ib->ptr[ib->length_dw++] = addr;
506 ib->ptr[ib->length_dw++] = 0x00000001;
508 for (i = ib->length_dw; i < ib_size_dw; ++i)
511 r = amdgpu_job_submit_direct(job, ring, &f);
512 amdgpu_ib_free(ring->adev, &ib_msg, f);
517 *fence = dma_fence_get(f);
522 amdgpu_job_free(job);
527 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
529 * @ring: ring we should submit the msg to
530 * @handle: VCE session handle to use
531 * @direct: direct or delayed pool
532 * @fence: optional fence to return
534 * Close up a stream for HW test or if userspace failed to do so
536 static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
537 bool direct, struct dma_fence **fence)
539 const unsigned ib_size_dw = 1024;
540 struct amdgpu_job *job;
541 struct amdgpu_ib *ib;
542 struct dma_fence *f = NULL;
545 r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
546 AMDGPU_FENCE_OWNER_UNDEFINED,
548 direct ? AMDGPU_IB_POOL_DIRECT :
549 AMDGPU_IB_POOL_DELAYED, &job);
555 /* stitch together an VCE destroy msg */
557 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
558 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
559 ib->ptr[ib->length_dw++] = handle;
561 ib->ptr[ib->length_dw++] = 0x00000020; /* len */
562 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
563 ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
564 ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
565 ib->ptr[ib->length_dw++] = 0x00000000;
566 ib->ptr[ib->length_dw++] = 0x00000000;
567 ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
568 ib->ptr[ib->length_dw++] = 0x00000000;
570 ib->ptr[ib->length_dw++] = 0x00000008; /* len */
571 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
573 for (i = ib->length_dw; i < ib_size_dw; ++i)
577 r = amdgpu_job_submit_direct(job, ring, &f);
579 f = amdgpu_job_submit(job);
584 *fence = dma_fence_get(f);
589 amdgpu_job_free(job);
594 * amdgpu_vce_validate_bo - make sure not to cross 4GB boundary
596 * @ib: indirect buffer to use
597 * @lo: address of lower dword
598 * @hi: address of higher dword
599 * @size: minimum size
600 * @index: bs/fb index
602 * Make sure that no BO cross a 4GB boundary.
604 static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p,
605 struct amdgpu_ib *ib, int lo, int hi,
606 unsigned size, int32_t index)
608 int64_t offset = ((uint64_t)size) * ((int64_t)index);
609 struct ttm_operation_ctx ctx = { false, false };
610 struct amdgpu_bo_va_mapping *mapping;
611 unsigned i, fpfn, lpfn;
612 struct amdgpu_bo *bo;
616 addr = ((uint64_t)amdgpu_ib_get_value(ib, lo)) |
617 ((uint64_t)amdgpu_ib_get_value(ib, hi)) << 32;
620 fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT;
621 lpfn = 0x100000000ULL >> PAGE_SHIFT;
624 lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT;
627 r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
629 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
630 addr, lo, hi, size, index);
634 for (i = 0; i < bo->placement.num_placement; ++i) {
635 bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn);
636 bo->placements[i].lpfn = bo->placements[i].lpfn ?
637 min(bo->placements[i].lpfn, lpfn) : lpfn;
639 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
644 * amdgpu_vce_cs_reloc - command submission relocation
647 * @ib: indirect buffer to use
648 * @lo: address of lower dword
649 * @hi: address of higher dword
650 * @size: minimum size
651 * @index: bs/fb index
653 * Patch relocation inside command stream with real buffer address
655 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, struct amdgpu_ib *ib,
656 int lo, int hi, unsigned size, uint32_t index)
658 struct amdgpu_bo_va_mapping *mapping;
659 struct amdgpu_bo *bo;
663 if (index == 0xffffffff)
666 addr = ((uint64_t)amdgpu_ib_get_value(ib, lo)) |
667 ((uint64_t)amdgpu_ib_get_value(ib, hi)) << 32;
668 addr += ((uint64_t)size) * ((uint64_t)index);
670 r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
672 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
673 addr, lo, hi, size, index);
677 if ((addr + (uint64_t)size) >
678 (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
679 DRM_ERROR("BO too small for addr 0x%010Lx %d %d\n",
684 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
685 addr += amdgpu_bo_gpu_offset(bo);
686 addr -= ((uint64_t)size) * ((uint64_t)index);
688 amdgpu_ib_set_value(ib, lo, lower_32_bits(addr));
689 amdgpu_ib_set_value(ib, hi, upper_32_bits(addr));
695 * amdgpu_vce_validate_handle - validate stream handle
698 * @handle: handle to validate
699 * @allocated: allocated a new handle?
701 * Validates the handle and return the found session index or -EINVAL
702 * we we don't have another free session index.
704 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
705 uint32_t handle, uint32_t *allocated)
709 /* validate the handle */
710 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
711 if (atomic_read(&p->adev->vce.handles[i]) == handle) {
712 if (p->adev->vce.filp[i] != p->filp) {
713 DRM_ERROR("VCE handle collision detected!\n");
720 /* handle not found try to alloc a new one */
721 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
722 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
723 p->adev->vce.filp[i] = p->filp;
724 p->adev->vce.img_size[i] = 0;
725 *allocated |= 1 << i;
730 DRM_ERROR("No more free VCE handles!\n");
735 * amdgpu_vce_ring_parse_cs - parse and validate the command stream
738 * @job: the job to parse
739 * @ib: the IB to patch
741 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p,
742 struct amdgpu_job *job,
743 struct amdgpu_ib *ib)
745 unsigned fb_idx = 0, bs_idx = 0;
746 int session_idx = -1;
747 uint32_t destroyed = 0;
748 uint32_t created = 0;
749 uint32_t allocated = 0;
750 uint32_t tmp, handle = 0;
751 uint32_t *size = &tmp;
756 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
758 for (idx = 0; idx < ib->length_dw;) {
759 uint32_t len = amdgpu_ib_get_value(ib, idx);
760 uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
762 if ((len < 8) || (len & 3)) {
763 DRM_ERROR("invalid VCE command length (%d)!\n", len);
769 case 0x00000002: /* task info */
770 fb_idx = amdgpu_ib_get_value(ib, idx + 6);
771 bs_idx = amdgpu_ib_get_value(ib, idx + 7);
774 case 0x03000001: /* encode */
775 r = amdgpu_vce_validate_bo(p, ib, idx + 10, idx + 9,
780 r = amdgpu_vce_validate_bo(p, ib, idx + 12, idx + 11,
786 case 0x05000001: /* context buffer */
787 r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
793 case 0x05000004: /* video bitstream buffer */
794 tmp = amdgpu_ib_get_value(ib, idx + 4);
795 r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
801 case 0x05000005: /* feedback buffer */
802 r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
808 case 0x0500000d: /* MV buffer */
809 r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
814 r = amdgpu_vce_validate_bo(p, ib, idx + 8, idx + 7,
824 for (idx = 0; idx < ib->length_dw;) {
825 uint32_t len = amdgpu_ib_get_value(ib, idx);
826 uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
829 case 0x00000001: /* session */
830 handle = amdgpu_ib_get_value(ib, idx + 2);
831 session_idx = amdgpu_vce_validate_handle(p, handle,
833 if (session_idx < 0) {
837 size = &p->adev->vce.img_size[session_idx];
840 case 0x00000002: /* task info */
841 fb_idx = amdgpu_ib_get_value(ib, idx + 6);
842 bs_idx = amdgpu_ib_get_value(ib, idx + 7);
845 case 0x01000001: /* create */
846 created |= 1 << session_idx;
847 if (destroyed & (1 << session_idx)) {
848 destroyed &= ~(1 << session_idx);
849 allocated |= 1 << session_idx;
851 } else if (!(allocated & (1 << session_idx))) {
852 DRM_ERROR("Handle already in use!\n");
857 *size = amdgpu_ib_get_value(ib, idx + 8) *
858 amdgpu_ib_get_value(ib, idx + 10) *
862 case 0x04000001: /* config extension */
863 case 0x04000002: /* pic control */
864 case 0x04000005: /* rate control */
865 case 0x04000007: /* motion estimation */
866 case 0x04000008: /* rdo */
867 case 0x04000009: /* vui */
868 case 0x05000002: /* auxiliary buffer */
869 case 0x05000009: /* clock table */
872 case 0x0500000c: /* hw config */
873 switch (p->adev->asic_type) {
874 #ifdef CONFIG_DRM_AMDGPU_CIK
886 case 0x03000001: /* encode */
887 r = amdgpu_vce_cs_reloc(p, ib, idx + 10, idx + 9,
892 r = amdgpu_vce_cs_reloc(p, ib, idx + 12, idx + 11,
898 case 0x02000001: /* destroy */
899 destroyed |= 1 << session_idx;
902 case 0x05000001: /* context buffer */
903 r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
909 case 0x05000004: /* video bitstream buffer */
910 tmp = amdgpu_ib_get_value(ib, idx + 4);
911 r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
917 case 0x05000005: /* feedback buffer */
918 r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
924 case 0x0500000d: /* MV buffer */
925 r = amdgpu_vce_cs_reloc(p, ib, idx + 3,
930 r = amdgpu_vce_cs_reloc(p, ib, idx + 8,
931 idx + 7, *size / 12, 0);
937 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
942 if (session_idx == -1) {
943 DRM_ERROR("no session command at start of IB\n");
951 if (allocated & ~created) {
952 DRM_ERROR("New session without create command!\n");
958 /* No error, free all destroyed handle slots */
961 /* Error during parsing, free all allocated handle slots */
965 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
967 atomic_set(&p->adev->vce.handles[i], 0);
973 * amdgpu_vce_ring_parse_cs_vm - parse the command stream in VM mode
976 * @job: the job to parse
977 * @ib: the IB to patch
979 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p,
980 struct amdgpu_job *job,
981 struct amdgpu_ib *ib)
983 int session_idx = -1;
984 uint32_t destroyed = 0;
985 uint32_t created = 0;
986 uint32_t allocated = 0;
987 uint32_t tmp, handle = 0;
988 int i, r = 0, idx = 0;
990 while (idx < ib->length_dw) {
991 uint32_t len = amdgpu_ib_get_value(ib, idx);
992 uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
994 if ((len < 8) || (len & 3)) {
995 DRM_ERROR("invalid VCE command length (%d)!\n", len);
1001 case 0x00000001: /* session */
1002 handle = amdgpu_ib_get_value(ib, idx + 2);
1003 session_idx = amdgpu_vce_validate_handle(p, handle,
1005 if (session_idx < 0) {
1011 case 0x01000001: /* create */
1012 created |= 1 << session_idx;
1013 if (destroyed & (1 << session_idx)) {
1014 destroyed &= ~(1 << session_idx);
1015 allocated |= 1 << session_idx;
1017 } else if (!(allocated & (1 << session_idx))) {
1018 DRM_ERROR("Handle already in use!\n");
1025 case 0x02000001: /* destroy */
1026 destroyed |= 1 << session_idx;
1033 if (session_idx == -1) {
1034 DRM_ERROR("no session command at start of IB\n");
1042 if (allocated & ~created) {
1043 DRM_ERROR("New session without create command!\n");
1049 /* No error, free all destroyed handle slots */
1051 amdgpu_ib_free(p->adev, ib, NULL);
1053 /* Error during parsing, free all allocated handle slots */
1057 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
1059 atomic_set(&p->adev->vce.handles[i], 0);
1065 * amdgpu_vce_ring_emit_ib - execute indirect buffer
1067 * @ring: engine to use
1068 * @job: job to retrieve vmid from
1069 * @ib: the IB to execute
1073 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
1074 struct amdgpu_job *job,
1075 struct amdgpu_ib *ib,
1078 amdgpu_ring_write(ring, VCE_CMD_IB);
1079 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1080 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1081 amdgpu_ring_write(ring, ib->length_dw);
1085 * amdgpu_vce_ring_emit_fence - add a fence command to the ring
1087 * @ring: engine to use
1089 * @seq: sequence number
1090 * @flags: fence related flags
1093 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1096 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1098 amdgpu_ring_write(ring, VCE_CMD_FENCE);
1099 amdgpu_ring_write(ring, addr);
1100 amdgpu_ring_write(ring, upper_32_bits(addr));
1101 amdgpu_ring_write(ring, seq);
1102 amdgpu_ring_write(ring, VCE_CMD_TRAP);
1103 amdgpu_ring_write(ring, VCE_CMD_END);
1107 * amdgpu_vce_ring_test_ring - test if VCE ring is working
1109 * @ring: the engine to test on
1112 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
1114 struct amdgpu_device *adev = ring->adev;
1117 int r, timeout = adev->usec_timeout;
1119 /* skip ring test for sriov*/
1120 if (amdgpu_sriov_vf(adev))
1123 r = amdgpu_ring_alloc(ring, 16);
1127 rptr = amdgpu_ring_get_rptr(ring);
1129 amdgpu_ring_write(ring, VCE_CMD_END);
1130 amdgpu_ring_commit(ring);
1132 for (i = 0; i < timeout; i++) {
1133 if (amdgpu_ring_get_rptr(ring) != rptr)
1145 * amdgpu_vce_ring_test_ib - test if VCE IBs are working
1147 * @ring: the engine to test on
1148 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
1151 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1153 struct dma_fence *fence = NULL;
1156 /* skip vce ring1/2 ib test for now, since it's not reliable */
1157 if (ring != &ring->adev->vce.ring[0])
1160 r = amdgpu_vce_get_create_msg(ring, 1, NULL);
1164 r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
1168 r = dma_fence_wait_timeout(fence, false, timeout);
1175 dma_fence_put(fence);
1179 enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring)
1183 return AMDGPU_RING_PRIO_0;
1185 return AMDGPU_RING_PRIO_1;
1187 return AMDGPU_RING_PRIO_2;
1189 return AMDGPU_RING_PRIO_0;