2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 #include <linux/firmware.h>
29 #include <linux/module.h>
32 #include <drm/drm_drv.h>
35 #include "amdgpu_pm.h"
36 #include "amdgpu_vce.h"
39 /* 1 second timeout */
40 #define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
43 #ifdef CONFIG_DRM_AMDGPU_CIK
44 #define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin"
45 #define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
46 #define FIRMWARE_KAVERI "amdgpu/kaveri_vce.bin"
47 #define FIRMWARE_HAWAII "amdgpu/hawaii_vce.bin"
48 #define FIRMWARE_MULLINS "amdgpu/mullins_vce.bin"
50 #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
51 #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
52 #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
53 #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
54 #define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
55 #define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
56 #define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
57 #define FIRMWARE_VEGAM "amdgpu/vegam_vce.bin"
59 #define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin"
60 #define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin"
61 #define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin"
63 #ifdef CONFIG_DRM_AMDGPU_CIK
64 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
65 MODULE_FIRMWARE(FIRMWARE_KABINI);
66 MODULE_FIRMWARE(FIRMWARE_KAVERI);
67 MODULE_FIRMWARE(FIRMWARE_HAWAII);
68 MODULE_FIRMWARE(FIRMWARE_MULLINS);
70 MODULE_FIRMWARE(FIRMWARE_TONGA);
71 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
72 MODULE_FIRMWARE(FIRMWARE_FIJI);
73 MODULE_FIRMWARE(FIRMWARE_STONEY);
74 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
75 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
76 MODULE_FIRMWARE(FIRMWARE_POLARIS12);
77 MODULE_FIRMWARE(FIRMWARE_VEGAM);
79 MODULE_FIRMWARE(FIRMWARE_VEGA10);
80 MODULE_FIRMWARE(FIRMWARE_VEGA12);
81 MODULE_FIRMWARE(FIRMWARE_VEGA20);
83 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
84 static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
85 struct dma_fence **fence);
86 static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
87 bool direct, struct dma_fence **fence);
90 * amdgpu_vce_sw_init - allocate memory, load vce firmware
92 * @adev: amdgpu_device pointer
93 * @size: size for the new BO
95 * First step to get VCE online, allocate memory and load the firmware
97 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
100 const struct common_firmware_header *hdr;
101 unsigned ucode_version, version_major, version_minor, binary_id;
104 switch (adev->asic_type) {
105 #ifdef CONFIG_DRM_AMDGPU_CIK
107 fw_name = FIRMWARE_BONAIRE;
110 fw_name = FIRMWARE_KAVERI;
113 fw_name = FIRMWARE_KABINI;
116 fw_name = FIRMWARE_HAWAII;
119 fw_name = FIRMWARE_MULLINS;
123 fw_name = FIRMWARE_TONGA;
126 fw_name = FIRMWARE_CARRIZO;
129 fw_name = FIRMWARE_FIJI;
132 fw_name = FIRMWARE_STONEY;
135 fw_name = FIRMWARE_POLARIS10;
138 fw_name = FIRMWARE_POLARIS11;
141 fw_name = FIRMWARE_POLARIS12;
144 fw_name = FIRMWARE_VEGAM;
147 fw_name = FIRMWARE_VEGA10;
150 fw_name = FIRMWARE_VEGA12;
153 fw_name = FIRMWARE_VEGA20;
160 r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
162 dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
167 r = amdgpu_ucode_validate(adev->vce.fw);
169 dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
171 release_firmware(adev->vce.fw);
176 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
178 ucode_version = le32_to_cpu(hdr->ucode_version);
179 version_major = (ucode_version >> 20) & 0xfff;
180 version_minor = (ucode_version >> 8) & 0xfff;
181 binary_id = ucode_version & 0xff;
182 DRM_INFO("Found VCE firmware Version: %d.%d Binary ID: %d\n",
183 version_major, version_minor, binary_id);
184 adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
187 r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
188 AMDGPU_GEM_DOMAIN_VRAM, &adev->vce.vcpu_bo,
189 &adev->vce.gpu_addr, &adev->vce.cpu_addr);
191 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
195 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
196 atomic_set(&adev->vce.handles[i], 0);
197 adev->vce.filp[i] = NULL;
200 INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
201 mutex_init(&adev->vce.idle_mutex);
207 * amdgpu_vce_sw_fini - free memory
209 * @adev: amdgpu_device pointer
211 * Last step on VCE teardown, free firmware memory
213 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
217 if (adev->vce.vcpu_bo == NULL)
220 drm_sched_entity_destroy(&adev->vce.entity);
222 amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
223 (void **)&adev->vce.cpu_addr);
225 for (i = 0; i < adev->vce.num_rings; i++)
226 amdgpu_ring_fini(&adev->vce.ring[i]);
228 release_firmware(adev->vce.fw);
229 mutex_destroy(&adev->vce.idle_mutex);
235 * amdgpu_vce_entity_init - init entity
237 * @adev: amdgpu_device pointer
240 int amdgpu_vce_entity_init(struct amdgpu_device *adev)
242 struct amdgpu_ring *ring;
243 struct drm_gpu_scheduler *sched;
246 ring = &adev->vce.ring[0];
247 sched = &ring->sched;
248 r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
251 DRM_ERROR("Failed setting up VCE run queue.\n");
259 * amdgpu_vce_suspend - unpin VCE fw memory
261 * @adev: amdgpu_device pointer
264 int amdgpu_vce_suspend(struct amdgpu_device *adev)
268 cancel_delayed_work_sync(&adev->vce.idle_work);
270 if (adev->vce.vcpu_bo == NULL)
273 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
274 if (atomic_read(&adev->vce.handles[i]))
277 if (i == AMDGPU_MAX_VCE_HANDLES)
280 /* TODO: suspending running encoding sessions isn't supported */
285 * amdgpu_vce_resume - pin VCE fw memory
287 * @adev: amdgpu_device pointer
290 int amdgpu_vce_resume(struct amdgpu_device *adev)
293 const struct common_firmware_header *hdr;
297 if (adev->vce.vcpu_bo == NULL)
300 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
302 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
306 r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
308 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
309 dev_err(adev->dev, "(%d) VCE map failed\n", r);
313 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
314 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
316 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
317 memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
318 adev->vce.fw->size - offset);
322 amdgpu_bo_kunmap(adev->vce.vcpu_bo);
324 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
330 * amdgpu_vce_idle_work_handler - power off VCE
332 * @work: pointer to work structure
334 * power of VCE when it's not used any more
336 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
338 struct amdgpu_device *adev =
339 container_of(work, struct amdgpu_device, vce.idle_work.work);
340 unsigned i, count = 0;
342 for (i = 0; i < adev->vce.num_rings; i++)
343 count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
346 if (adev->pm.dpm_enabled) {
347 amdgpu_dpm_enable_vce(adev, false);
349 amdgpu_asic_set_vce_clocks(adev, 0, 0);
350 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
352 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
356 schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
361 * amdgpu_vce_ring_begin_use - power up VCE
365 * Make sure VCE is powerd up when we want to use it
367 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
369 struct amdgpu_device *adev = ring->adev;
372 if (amdgpu_sriov_vf(adev))
375 mutex_lock(&adev->vce.idle_mutex);
376 set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
378 if (adev->pm.dpm_enabled) {
379 amdgpu_dpm_enable_vce(adev, true);
381 amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
382 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
383 AMD_CG_STATE_UNGATE);
384 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
385 AMD_PG_STATE_UNGATE);
389 mutex_unlock(&adev->vce.idle_mutex);
393 * amdgpu_vce_ring_end_use - power VCE down
397 * Schedule work to power VCE down again
399 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
401 if (!amdgpu_sriov_vf(ring->adev))
402 schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
406 * amdgpu_vce_free_handles - free still open VCE handles
408 * @adev: amdgpu_device pointer
409 * @filp: drm file pointer
411 * Close all VCE handles still open by this file pointer
413 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
415 struct amdgpu_ring *ring = &adev->vce.ring[0];
417 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
418 uint32_t handle = atomic_read(&adev->vce.handles[i]);
420 if (!handle || adev->vce.filp[i] != filp)
423 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
425 DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
427 adev->vce.filp[i] = NULL;
428 atomic_set(&adev->vce.handles[i], 0);
433 * amdgpu_vce_get_create_msg - generate a VCE create msg
435 * @ring: ring we should submit the msg to
436 * @handle: VCE session handle to use
437 * @bo: amdgpu object for which we query the offset
438 * @fence: optional fence to return
440 * Open up a stream for HW test
442 static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
443 struct dma_fence **fence)
445 const unsigned ib_size_dw = 1024;
446 struct amdgpu_job *job;
447 struct amdgpu_ib *ib;
448 struct amdgpu_ib ib_msg;
449 struct dma_fence *f = NULL;
453 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
454 AMDGPU_IB_POOL_DIRECT, &job);
458 memset(&ib_msg, 0, sizeof(ib_msg));
459 /* only one gpu page is needed, alloc +1 page to make addr aligned. */
460 r = amdgpu_ib_get(ring->adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
461 AMDGPU_IB_POOL_DIRECT,
467 /* let addr point to page boundary */
468 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg.gpu_addr);
470 /* stitch together an VCE create msg */
472 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
473 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
474 ib->ptr[ib->length_dw++] = handle;
476 if ((ring->adev->vce.fw_version >> 24) >= 52)
477 ib->ptr[ib->length_dw++] = 0x00000040; /* len */
479 ib->ptr[ib->length_dw++] = 0x00000030; /* len */
480 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
481 ib->ptr[ib->length_dw++] = 0x00000000;
482 ib->ptr[ib->length_dw++] = 0x00000042;
483 ib->ptr[ib->length_dw++] = 0x0000000a;
484 ib->ptr[ib->length_dw++] = 0x00000001;
485 ib->ptr[ib->length_dw++] = 0x00000080;
486 ib->ptr[ib->length_dw++] = 0x00000060;
487 ib->ptr[ib->length_dw++] = 0x00000100;
488 ib->ptr[ib->length_dw++] = 0x00000100;
489 ib->ptr[ib->length_dw++] = 0x0000000c;
490 ib->ptr[ib->length_dw++] = 0x00000000;
491 if ((ring->adev->vce.fw_version >> 24) >= 52) {
492 ib->ptr[ib->length_dw++] = 0x00000000;
493 ib->ptr[ib->length_dw++] = 0x00000000;
494 ib->ptr[ib->length_dw++] = 0x00000000;
495 ib->ptr[ib->length_dw++] = 0x00000000;
498 ib->ptr[ib->length_dw++] = 0x00000014; /* len */
499 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
500 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
501 ib->ptr[ib->length_dw++] = addr;
502 ib->ptr[ib->length_dw++] = 0x00000001;
504 for (i = ib->length_dw; i < ib_size_dw; ++i)
507 r = amdgpu_job_submit_direct(job, ring, &f);
508 amdgpu_ib_free(ring->adev, &ib_msg, f);
513 *fence = dma_fence_get(f);
518 amdgpu_job_free(job);
523 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
525 * @ring: ring we should submit the msg to
526 * @handle: VCE session handle to use
527 * @direct: direct or delayed pool
528 * @fence: optional fence to return
530 * Close up a stream for HW test or if userspace failed to do so
532 static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
533 bool direct, struct dma_fence **fence)
535 const unsigned ib_size_dw = 1024;
536 struct amdgpu_job *job;
537 struct amdgpu_ib *ib;
538 struct dma_fence *f = NULL;
541 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
542 direct ? AMDGPU_IB_POOL_DIRECT :
543 AMDGPU_IB_POOL_DELAYED, &job);
549 /* stitch together an VCE destroy msg */
551 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
552 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
553 ib->ptr[ib->length_dw++] = handle;
555 ib->ptr[ib->length_dw++] = 0x00000020; /* len */
556 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
557 ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
558 ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
559 ib->ptr[ib->length_dw++] = 0x00000000;
560 ib->ptr[ib->length_dw++] = 0x00000000;
561 ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
562 ib->ptr[ib->length_dw++] = 0x00000000;
564 ib->ptr[ib->length_dw++] = 0x00000008; /* len */
565 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
567 for (i = ib->length_dw; i < ib_size_dw; ++i)
571 r = amdgpu_job_submit_direct(job, ring, &f);
573 r = amdgpu_job_submit(job, &ring->adev->vce.entity,
574 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
579 *fence = dma_fence_get(f);
584 amdgpu_job_free(job);
589 * amdgpu_vce_validate_bo - make sure not to cross 4GB boundary
592 * @ib_idx: indirect buffer to use
593 * @lo: address of lower dword
594 * @hi: address of higher dword
595 * @size: minimum size
596 * @index: bs/fb index
598 * Make sure that no BO cross a 4GB boundary.
600 static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
601 int lo, int hi, unsigned size, int32_t index)
603 int64_t offset = ((uint64_t)size) * ((int64_t)index);
604 struct ttm_operation_ctx ctx = { false, false };
605 struct amdgpu_bo_va_mapping *mapping;
606 unsigned i, fpfn, lpfn;
607 struct amdgpu_bo *bo;
611 addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
612 ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
615 fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT;
616 lpfn = 0x100000000ULL >> PAGE_SHIFT;
619 lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT;
622 r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
624 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
625 addr, lo, hi, size, index);
629 for (i = 0; i < bo->placement.num_placement; ++i) {
630 bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn);
631 bo->placements[i].lpfn = bo->placements[i].lpfn ?
632 min(bo->placements[i].lpfn, lpfn) : lpfn;
634 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
639 * amdgpu_vce_cs_reloc - command submission relocation
642 * @ib_idx: indirect buffer to use
643 * @lo: address of lower dword
644 * @hi: address of higher dword
645 * @size: minimum size
646 * @index: bs/fb index
648 * Patch relocation inside command stream with real buffer address
650 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
651 int lo, int hi, unsigned size, uint32_t index)
653 struct amdgpu_bo_va_mapping *mapping;
654 struct amdgpu_bo *bo;
658 if (index == 0xffffffff)
661 addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
662 ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
663 addr += ((uint64_t)size) * ((uint64_t)index);
665 r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
667 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
668 addr, lo, hi, size, index);
672 if ((addr + (uint64_t)size) >
673 (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
674 DRM_ERROR("BO too small for addr 0x%010Lx %d %d\n",
679 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
680 addr += amdgpu_bo_gpu_offset(bo);
681 addr -= ((uint64_t)size) * ((uint64_t)index);
683 amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
684 amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
690 * amdgpu_vce_validate_handle - validate stream handle
693 * @handle: handle to validate
694 * @allocated: allocated a new handle?
696 * Validates the handle and return the found session index or -EINVAL
697 * we we don't have another free session index.
699 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
700 uint32_t handle, uint32_t *allocated)
704 /* validate the handle */
705 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
706 if (atomic_read(&p->adev->vce.handles[i]) == handle) {
707 if (p->adev->vce.filp[i] != p->filp) {
708 DRM_ERROR("VCE handle collision detected!\n");
715 /* handle not found try to alloc a new one */
716 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
717 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
718 p->adev->vce.filp[i] = p->filp;
719 p->adev->vce.img_size[i] = 0;
720 *allocated |= 1 << i;
725 DRM_ERROR("No more free VCE handles!\n");
730 * amdgpu_vce_ring_parse_cs - parse and validate the command stream
733 * @ib_idx: indirect buffer to use
735 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
737 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
738 unsigned fb_idx = 0, bs_idx = 0;
739 int session_idx = -1;
740 uint32_t destroyed = 0;
741 uint32_t created = 0;
742 uint32_t allocated = 0;
743 uint32_t tmp, handle = 0;
744 uint32_t *size = &tmp;
749 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
751 for (idx = 0; idx < ib->length_dw;) {
752 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
753 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
755 if ((len < 8) || (len & 3)) {
756 DRM_ERROR("invalid VCE command length (%d)!\n", len);
762 case 0x00000002: /* task info */
763 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
764 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
767 case 0x03000001: /* encode */
768 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 10,
773 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 12,
779 case 0x05000001: /* context buffer */
780 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
786 case 0x05000004: /* video bitstream buffer */
787 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
788 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
794 case 0x05000005: /* feedback buffer */
795 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
801 case 0x0500000d: /* MV buffer */
802 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
807 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 8,
817 for (idx = 0; idx < ib->length_dw;) {
818 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
819 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
822 case 0x00000001: /* session */
823 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
824 session_idx = amdgpu_vce_validate_handle(p, handle,
826 if (session_idx < 0) {
830 size = &p->adev->vce.img_size[session_idx];
833 case 0x00000002: /* task info */
834 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
835 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
838 case 0x01000001: /* create */
839 created |= 1 << session_idx;
840 if (destroyed & (1 << session_idx)) {
841 destroyed &= ~(1 << session_idx);
842 allocated |= 1 << session_idx;
844 } else if (!(allocated & (1 << session_idx))) {
845 DRM_ERROR("Handle already in use!\n");
850 *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
851 amdgpu_get_ib_value(p, ib_idx, idx + 10) *
855 case 0x04000001: /* config extension */
856 case 0x04000002: /* pic control */
857 case 0x04000005: /* rate control */
858 case 0x04000007: /* motion estimation */
859 case 0x04000008: /* rdo */
860 case 0x04000009: /* vui */
861 case 0x05000002: /* auxiliary buffer */
862 case 0x05000009: /* clock table */
865 case 0x0500000c: /* hw config */
866 switch (p->adev->asic_type) {
867 #ifdef CONFIG_DRM_AMDGPU_CIK
879 case 0x03000001: /* encode */
880 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
885 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
891 case 0x02000001: /* destroy */
892 destroyed |= 1 << session_idx;
895 case 0x05000001: /* context buffer */
896 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
902 case 0x05000004: /* video bitstream buffer */
903 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
904 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
910 case 0x05000005: /* feedback buffer */
911 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
917 case 0x0500000d: /* MV buffer */
918 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3,
923 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 8,
924 idx + 7, *size / 12, 0);
930 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
935 if (session_idx == -1) {
936 DRM_ERROR("no session command at start of IB\n");
944 if (allocated & ~created) {
945 DRM_ERROR("New session without create command!\n");
951 /* No error, free all destroyed handle slots */
954 /* Error during parsing, free all allocated handle slots */
958 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
960 atomic_set(&p->adev->vce.handles[i], 0);
966 * amdgpu_vce_ring_parse_cs_vm - parse the command stream in VM mode
969 * @ib_idx: indirect buffer to use
971 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
973 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
974 int session_idx = -1;
975 uint32_t destroyed = 0;
976 uint32_t created = 0;
977 uint32_t allocated = 0;
978 uint32_t tmp, handle = 0;
979 int i, r = 0, idx = 0;
981 while (idx < ib->length_dw) {
982 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
983 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
985 if ((len < 8) || (len & 3)) {
986 DRM_ERROR("invalid VCE command length (%d)!\n", len);
992 case 0x00000001: /* session */
993 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
994 session_idx = amdgpu_vce_validate_handle(p, handle,
996 if (session_idx < 0) {
1002 case 0x01000001: /* create */
1003 created |= 1 << session_idx;
1004 if (destroyed & (1 << session_idx)) {
1005 destroyed &= ~(1 << session_idx);
1006 allocated |= 1 << session_idx;
1008 } else if (!(allocated & (1 << session_idx))) {
1009 DRM_ERROR("Handle already in use!\n");
1016 case 0x02000001: /* destroy */
1017 destroyed |= 1 << session_idx;
1024 if (session_idx == -1) {
1025 DRM_ERROR("no session command at start of IB\n");
1033 if (allocated & ~created) {
1034 DRM_ERROR("New session without create command!\n");
1040 /* No error, free all destroyed handle slots */
1042 amdgpu_ib_free(p->adev, ib, NULL);
1044 /* Error during parsing, free all allocated handle slots */
1048 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
1050 atomic_set(&p->adev->vce.handles[i], 0);
1056 * amdgpu_vce_ring_emit_ib - execute indirect buffer
1058 * @ring: engine to use
1059 * @job: job to retrieve vmid from
1060 * @ib: the IB to execute
1064 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
1065 struct amdgpu_job *job,
1066 struct amdgpu_ib *ib,
1069 amdgpu_ring_write(ring, VCE_CMD_IB);
1070 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1071 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1072 amdgpu_ring_write(ring, ib->length_dw);
1076 * amdgpu_vce_ring_emit_fence - add a fence command to the ring
1078 * @ring: engine to use
1080 * @seq: sequence number
1081 * @flags: fence related flags
1084 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1087 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1089 amdgpu_ring_write(ring, VCE_CMD_FENCE);
1090 amdgpu_ring_write(ring, addr);
1091 amdgpu_ring_write(ring, upper_32_bits(addr));
1092 amdgpu_ring_write(ring, seq);
1093 amdgpu_ring_write(ring, VCE_CMD_TRAP);
1094 amdgpu_ring_write(ring, VCE_CMD_END);
1098 * amdgpu_vce_ring_test_ring - test if VCE ring is working
1100 * @ring: the engine to test on
1103 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
1105 struct amdgpu_device *adev = ring->adev;
1108 int r, timeout = adev->usec_timeout;
1110 /* skip ring test for sriov*/
1111 if (amdgpu_sriov_vf(adev))
1114 r = amdgpu_ring_alloc(ring, 16);
1118 rptr = amdgpu_ring_get_rptr(ring);
1120 amdgpu_ring_write(ring, VCE_CMD_END);
1121 amdgpu_ring_commit(ring);
1123 for (i = 0; i < timeout; i++) {
1124 if (amdgpu_ring_get_rptr(ring) != rptr)
1136 * amdgpu_vce_ring_test_ib - test if VCE IBs are working
1138 * @ring: the engine to test on
1139 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
1142 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1144 struct dma_fence *fence = NULL;
1147 /* skip vce ring1/2 ib test for now, since it's not reliable */
1148 if (ring != &ring->adev->vce.ring[0])
1151 r = amdgpu_vce_get_create_msg(ring, 1, NULL);
1155 r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
1159 r = dma_fence_wait_timeout(fence, false, timeout);
1166 dma_fence_put(fence);
1170 enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring)
1174 return AMDGPU_RING_PRIO_0;
1176 return AMDGPU_RING_PRIO_1;
1178 return AMDGPU_RING_PRIO_2;
1180 return AMDGPU_RING_PRIO_0;