2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 #include <linux/firmware.h>
29 #include <linux/module.h>
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vce.h"
38 /* 1 second timeout */
39 #define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
42 #ifdef CONFIG_DRM_AMDGPU_CIK
43 #define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin"
44 #define FIRMWARE_KABINI "radeon/kabini_vce.bin"
45 #define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
46 #define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
47 #define FIRMWARE_MULLINS "radeon/mullins_vce.bin"
49 #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
50 #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
51 #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
52 #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
53 #define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
54 #define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
55 #define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
56 #define FIRMWARE_VEGAM "amdgpu/vegam_vce.bin"
58 #define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin"
59 #define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin"
61 #ifdef CONFIG_DRM_AMDGPU_CIK
62 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
63 MODULE_FIRMWARE(FIRMWARE_KABINI);
64 MODULE_FIRMWARE(FIRMWARE_KAVERI);
65 MODULE_FIRMWARE(FIRMWARE_HAWAII);
66 MODULE_FIRMWARE(FIRMWARE_MULLINS);
68 MODULE_FIRMWARE(FIRMWARE_TONGA);
69 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
70 MODULE_FIRMWARE(FIRMWARE_FIJI);
71 MODULE_FIRMWARE(FIRMWARE_STONEY);
72 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
73 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
74 MODULE_FIRMWARE(FIRMWARE_POLARIS12);
75 MODULE_FIRMWARE(FIRMWARE_VEGAM);
77 MODULE_FIRMWARE(FIRMWARE_VEGA10);
78 MODULE_FIRMWARE(FIRMWARE_VEGA12);
80 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
83 * amdgpu_vce_init - allocate memory, load vce firmware
85 * @adev: amdgpu_device pointer
87 * First step to get VCE online, allocate memory and load the firmware
89 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
91 struct amdgpu_ring *ring;
92 struct drm_sched_rq *rq;
94 const struct common_firmware_header *hdr;
95 unsigned ucode_version, version_major, version_minor, binary_id;
98 switch (adev->asic_type) {
99 #ifdef CONFIG_DRM_AMDGPU_CIK
101 fw_name = FIRMWARE_BONAIRE;
104 fw_name = FIRMWARE_KAVERI;
107 fw_name = FIRMWARE_KABINI;
110 fw_name = FIRMWARE_HAWAII;
113 fw_name = FIRMWARE_MULLINS;
117 fw_name = FIRMWARE_TONGA;
120 fw_name = FIRMWARE_CARRIZO;
123 fw_name = FIRMWARE_FIJI;
126 fw_name = FIRMWARE_STONEY;
129 fw_name = FIRMWARE_POLARIS10;
132 fw_name = FIRMWARE_POLARIS11;
135 fw_name = FIRMWARE_POLARIS12;
138 fw_name = FIRMWARE_VEGAM;
141 fw_name = FIRMWARE_VEGA10;
144 fw_name = FIRMWARE_VEGA12;
151 r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
153 dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
158 r = amdgpu_ucode_validate(adev->vce.fw);
160 dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
162 release_firmware(adev->vce.fw);
167 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
169 ucode_version = le32_to_cpu(hdr->ucode_version);
170 version_major = (ucode_version >> 20) & 0xfff;
171 version_minor = (ucode_version >> 8) & 0xfff;
172 binary_id = ucode_version & 0xff;
173 DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
174 version_major, version_minor, binary_id);
175 adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
178 r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
179 AMDGPU_GEM_DOMAIN_VRAM, &adev->vce.vcpu_bo,
180 &adev->vce.gpu_addr, &adev->vce.cpu_addr);
182 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
186 ring = &adev->vce.ring[0];
187 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
188 r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
191 DRM_ERROR("Failed setting up VCE run queue.\n");
195 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
196 atomic_set(&adev->vce.handles[i], 0);
197 adev->vce.filp[i] = NULL;
200 INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
201 mutex_init(&adev->vce.idle_mutex);
207 * amdgpu_vce_fini - free memory
209 * @adev: amdgpu_device pointer
211 * Last step on VCE teardown, free firmware memory
213 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
217 if (adev->vce.vcpu_bo == NULL)
220 drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
222 amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
223 (void **)&adev->vce.cpu_addr);
225 for (i = 0; i < adev->vce.num_rings; i++)
226 amdgpu_ring_fini(&adev->vce.ring[i]);
228 release_firmware(adev->vce.fw);
229 mutex_destroy(&adev->vce.idle_mutex);
235 * amdgpu_vce_suspend - unpin VCE fw memory
237 * @adev: amdgpu_device pointer
240 int amdgpu_vce_suspend(struct amdgpu_device *adev)
244 if (adev->vce.vcpu_bo == NULL)
247 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
248 if (atomic_read(&adev->vce.handles[i]))
251 if (i == AMDGPU_MAX_VCE_HANDLES)
254 cancel_delayed_work_sync(&adev->vce.idle_work);
255 /* TODO: suspending running encoding sessions isn't supported */
260 * amdgpu_vce_resume - pin VCE fw memory
262 * @adev: amdgpu_device pointer
265 int amdgpu_vce_resume(struct amdgpu_device *adev)
268 const struct common_firmware_header *hdr;
272 if (adev->vce.vcpu_bo == NULL)
275 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
277 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
281 r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
283 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
284 dev_err(adev->dev, "(%d) VCE map failed\n", r);
288 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
289 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
290 memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
291 adev->vce.fw->size - offset);
293 amdgpu_bo_kunmap(adev->vce.vcpu_bo);
295 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
301 * amdgpu_vce_idle_work_handler - power off VCE
303 * @work: pointer to work structure
305 * power of VCE when it's not used any more
307 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
309 struct amdgpu_device *adev =
310 container_of(work, struct amdgpu_device, vce.idle_work.work);
311 unsigned i, count = 0;
313 for (i = 0; i < adev->vce.num_rings; i++)
314 count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
317 if (adev->pm.dpm_enabled) {
318 amdgpu_dpm_enable_vce(adev, false);
320 amdgpu_asic_set_vce_clocks(adev, 0, 0);
321 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
323 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
327 schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
332 * amdgpu_vce_ring_begin_use - power up VCE
336 * Make sure VCE is powerd up when we want to use it
338 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
340 struct amdgpu_device *adev = ring->adev;
343 if (amdgpu_sriov_vf(adev))
346 mutex_lock(&adev->vce.idle_mutex);
347 set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
349 if (adev->pm.dpm_enabled) {
350 amdgpu_dpm_enable_vce(adev, true);
352 amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
353 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
354 AMD_CG_STATE_UNGATE);
355 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
356 AMD_PG_STATE_UNGATE);
360 mutex_unlock(&adev->vce.idle_mutex);
364 * amdgpu_vce_ring_end_use - power VCE down
368 * Schedule work to power VCE down again
370 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
372 if (!amdgpu_sriov_vf(ring->adev))
373 schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
377 * amdgpu_vce_free_handles - free still open VCE handles
379 * @adev: amdgpu_device pointer
380 * @filp: drm file pointer
382 * Close all VCE handles still open by this file pointer
384 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
386 struct amdgpu_ring *ring = &adev->vce.ring[0];
388 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
389 uint32_t handle = atomic_read(&adev->vce.handles[i]);
391 if (!handle || adev->vce.filp[i] != filp)
394 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
396 DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
398 adev->vce.filp[i] = NULL;
399 atomic_set(&adev->vce.handles[i], 0);
404 * amdgpu_vce_get_create_msg - generate a VCE create msg
406 * @adev: amdgpu_device pointer
407 * @ring: ring we should submit the msg to
408 * @handle: VCE session handle to use
409 * @fence: optional fence to return
411 * Open up a stream for HW test
413 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
414 struct dma_fence **fence)
416 const unsigned ib_size_dw = 1024;
417 struct amdgpu_job *job;
418 struct amdgpu_ib *ib;
419 struct dma_fence *f = NULL;
423 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
429 dummy = ib->gpu_addr + 1024;
431 /* stitch together an VCE create msg */
433 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
434 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
435 ib->ptr[ib->length_dw++] = handle;
437 if ((ring->adev->vce.fw_version >> 24) >= 52)
438 ib->ptr[ib->length_dw++] = 0x00000040; /* len */
440 ib->ptr[ib->length_dw++] = 0x00000030; /* len */
441 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
442 ib->ptr[ib->length_dw++] = 0x00000000;
443 ib->ptr[ib->length_dw++] = 0x00000042;
444 ib->ptr[ib->length_dw++] = 0x0000000a;
445 ib->ptr[ib->length_dw++] = 0x00000001;
446 ib->ptr[ib->length_dw++] = 0x00000080;
447 ib->ptr[ib->length_dw++] = 0x00000060;
448 ib->ptr[ib->length_dw++] = 0x00000100;
449 ib->ptr[ib->length_dw++] = 0x00000100;
450 ib->ptr[ib->length_dw++] = 0x0000000c;
451 ib->ptr[ib->length_dw++] = 0x00000000;
452 if ((ring->adev->vce.fw_version >> 24) >= 52) {
453 ib->ptr[ib->length_dw++] = 0x00000000;
454 ib->ptr[ib->length_dw++] = 0x00000000;
455 ib->ptr[ib->length_dw++] = 0x00000000;
456 ib->ptr[ib->length_dw++] = 0x00000000;
459 ib->ptr[ib->length_dw++] = 0x00000014; /* len */
460 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
461 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
462 ib->ptr[ib->length_dw++] = dummy;
463 ib->ptr[ib->length_dw++] = 0x00000001;
465 for (i = ib->length_dw; i < ib_size_dw; ++i)
468 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
469 job->fence = dma_fence_get(f);
473 amdgpu_job_free(job);
475 *fence = dma_fence_get(f);
480 amdgpu_job_free(job);
485 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
487 * @adev: amdgpu_device pointer
488 * @ring: ring we should submit the msg to
489 * @handle: VCE session handle to use
490 * @fence: optional fence to return
492 * Close up a stream for HW test or if userspace failed to do so
494 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
495 bool direct, struct dma_fence **fence)
497 const unsigned ib_size_dw = 1024;
498 struct amdgpu_job *job;
499 struct amdgpu_ib *ib;
500 struct dma_fence *f = NULL;
503 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
509 /* stitch together an VCE destroy msg */
511 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
512 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
513 ib->ptr[ib->length_dw++] = handle;
515 ib->ptr[ib->length_dw++] = 0x00000020; /* len */
516 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
517 ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
518 ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
519 ib->ptr[ib->length_dw++] = 0x00000000;
520 ib->ptr[ib->length_dw++] = 0x00000000;
521 ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
522 ib->ptr[ib->length_dw++] = 0x00000000;
524 ib->ptr[ib->length_dw++] = 0x00000008; /* len */
525 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
527 for (i = ib->length_dw; i < ib_size_dw; ++i)
531 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
532 job->fence = dma_fence_get(f);
536 amdgpu_job_free(job);
538 r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
539 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
545 *fence = dma_fence_get(f);
550 amdgpu_job_free(job);
555 * amdgpu_vce_cs_validate_bo - make sure not to cross 4GB boundary
558 * @lo: address of lower dword
559 * @hi: address of higher dword
560 * @size: minimum size
561 * @index: bs/fb index
563 * Make sure that no BO cross a 4GB boundary.
565 static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
566 int lo, int hi, unsigned size, int32_t index)
568 int64_t offset = ((uint64_t)size) * ((int64_t)index);
569 struct ttm_operation_ctx ctx = { false, false };
570 struct amdgpu_bo_va_mapping *mapping;
571 unsigned i, fpfn, lpfn;
572 struct amdgpu_bo *bo;
576 addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
577 ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
580 fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT;
581 lpfn = 0x100000000ULL >> PAGE_SHIFT;
584 lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT;
587 r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
589 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
590 addr, lo, hi, size, index);
594 for (i = 0; i < bo->placement.num_placement; ++i) {
595 bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn);
596 bo->placements[i].lpfn = bo->placements[i].lpfn ?
597 min(bo->placements[i].lpfn, lpfn) : lpfn;
599 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
604 * amdgpu_vce_cs_reloc - command submission relocation
607 * @lo: address of lower dword
608 * @hi: address of higher dword
609 * @size: minimum size
611 * Patch relocation inside command stream with real buffer address
613 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
614 int lo, int hi, unsigned size, uint32_t index)
616 struct amdgpu_bo_va_mapping *mapping;
617 struct amdgpu_bo *bo;
621 if (index == 0xffffffff)
624 addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
625 ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
626 addr += ((uint64_t)size) * ((uint64_t)index);
628 r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
630 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
631 addr, lo, hi, size, index);
635 if ((addr + (uint64_t)size) >
636 (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
637 DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
642 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
643 addr += amdgpu_bo_gpu_offset(bo);
644 addr -= ((uint64_t)size) * ((uint64_t)index);
646 amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
647 amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
653 * amdgpu_vce_validate_handle - validate stream handle
656 * @handle: handle to validate
657 * @allocated: allocated a new handle?
659 * Validates the handle and return the found session index or -EINVAL
660 * we we don't have another free session index.
662 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
663 uint32_t handle, uint32_t *allocated)
667 /* validate the handle */
668 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
669 if (atomic_read(&p->adev->vce.handles[i]) == handle) {
670 if (p->adev->vce.filp[i] != p->filp) {
671 DRM_ERROR("VCE handle collision detected!\n");
678 /* handle not found try to alloc a new one */
679 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
680 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
681 p->adev->vce.filp[i] = p->filp;
682 p->adev->vce.img_size[i] = 0;
683 *allocated |= 1 << i;
688 DRM_ERROR("No more free VCE handles!\n");
693 * amdgpu_vce_cs_parse - parse and validate the command stream
698 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
700 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
701 unsigned fb_idx = 0, bs_idx = 0;
702 int session_idx = -1;
703 uint32_t destroyed = 0;
704 uint32_t created = 0;
705 uint32_t allocated = 0;
706 uint32_t tmp, handle = 0;
707 uint32_t *size = &tmp;
712 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
714 for (idx = 0; idx < ib->length_dw;) {
715 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
716 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
718 if ((len < 8) || (len & 3)) {
719 DRM_ERROR("invalid VCE command length (%d)!\n", len);
725 case 0x00000002: /* task info */
726 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
727 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
730 case 0x03000001: /* encode */
731 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 10,
736 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 12,
742 case 0x05000001: /* context buffer */
743 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
749 case 0x05000004: /* video bitstream buffer */
750 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
751 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
757 case 0x05000005: /* feedback buffer */
758 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
764 case 0x0500000d: /* MV buffer */
765 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
770 r = amdgpu_vce_validate_bo(p, ib_idx, idx + 8,
780 for (idx = 0; idx < ib->length_dw;) {
781 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
782 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
785 case 0x00000001: /* session */
786 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
787 session_idx = amdgpu_vce_validate_handle(p, handle,
789 if (session_idx < 0) {
793 size = &p->adev->vce.img_size[session_idx];
796 case 0x00000002: /* task info */
797 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
798 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
801 case 0x01000001: /* create */
802 created |= 1 << session_idx;
803 if (destroyed & (1 << session_idx)) {
804 destroyed &= ~(1 << session_idx);
805 allocated |= 1 << session_idx;
807 } else if (!(allocated & (1 << session_idx))) {
808 DRM_ERROR("Handle already in use!\n");
813 *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
814 amdgpu_get_ib_value(p, ib_idx, idx + 10) *
818 case 0x04000001: /* config extension */
819 case 0x04000002: /* pic control */
820 case 0x04000005: /* rate control */
821 case 0x04000007: /* motion estimation */
822 case 0x04000008: /* rdo */
823 case 0x04000009: /* vui */
824 case 0x05000002: /* auxiliary buffer */
825 case 0x05000009: /* clock table */
828 case 0x0500000c: /* hw config */
829 switch (p->adev->asic_type) {
830 #ifdef CONFIG_DRM_AMDGPU_CIK
842 case 0x03000001: /* encode */
843 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
848 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
854 case 0x02000001: /* destroy */
855 destroyed |= 1 << session_idx;
858 case 0x05000001: /* context buffer */
859 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
865 case 0x05000004: /* video bitstream buffer */
866 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
867 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
873 case 0x05000005: /* feedback buffer */
874 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
880 case 0x0500000d: /* MV buffer */
881 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3,
886 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 8,
887 idx + 7, *size / 12, 0);
893 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
898 if (session_idx == -1) {
899 DRM_ERROR("no session command at start of IB\n");
907 if (allocated & ~created) {
908 DRM_ERROR("New session without create command!\n");
914 /* No error, free all destroyed handle slots */
917 /* Error during parsing, free all allocated handle slots */
921 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
923 atomic_set(&p->adev->vce.handles[i], 0);
929 * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
934 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
936 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
937 int session_idx = -1;
938 uint32_t destroyed = 0;
939 uint32_t created = 0;
940 uint32_t allocated = 0;
941 uint32_t tmp, handle = 0;
942 int i, r = 0, idx = 0;
944 while (idx < ib->length_dw) {
945 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
946 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
948 if ((len < 8) || (len & 3)) {
949 DRM_ERROR("invalid VCE command length (%d)!\n", len);
955 case 0x00000001: /* session */
956 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
957 session_idx = amdgpu_vce_validate_handle(p, handle,
959 if (session_idx < 0) {
965 case 0x01000001: /* create */
966 created |= 1 << session_idx;
967 if (destroyed & (1 << session_idx)) {
968 destroyed &= ~(1 << session_idx);
969 allocated |= 1 << session_idx;
971 } else if (!(allocated & (1 << session_idx))) {
972 DRM_ERROR("Handle already in use!\n");
979 case 0x02000001: /* destroy */
980 destroyed |= 1 << session_idx;
987 if (session_idx == -1) {
988 DRM_ERROR("no session command at start of IB\n");
996 if (allocated & ~created) {
997 DRM_ERROR("New session without create command!\n");
1003 /* No error, free all destroyed handle slots */
1005 amdgpu_ib_free(p->adev, ib, NULL);
1007 /* Error during parsing, free all allocated handle slots */
1011 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
1013 atomic_set(&p->adev->vce.handles[i], 0);
1019 * amdgpu_vce_ring_emit_ib - execute indirect buffer
1021 * @ring: engine to use
1022 * @ib: the IB to execute
1025 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
1026 unsigned vmid, bool ctx_switch)
1028 amdgpu_ring_write(ring, VCE_CMD_IB);
1029 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1030 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1031 amdgpu_ring_write(ring, ib->length_dw);
1035 * amdgpu_vce_ring_emit_fence - add a fence command to the ring
1037 * @ring: engine to use
1041 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1044 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1046 amdgpu_ring_write(ring, VCE_CMD_FENCE);
1047 amdgpu_ring_write(ring, addr);
1048 amdgpu_ring_write(ring, upper_32_bits(addr));
1049 amdgpu_ring_write(ring, seq);
1050 amdgpu_ring_write(ring, VCE_CMD_TRAP);
1051 amdgpu_ring_write(ring, VCE_CMD_END);
1055 * amdgpu_vce_ring_test_ring - test if VCE ring is working
1057 * @ring: the engine to test on
1060 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
1062 struct amdgpu_device *adev = ring->adev;
1063 uint32_t rptr = amdgpu_ring_get_rptr(ring);
1065 int r, timeout = adev->usec_timeout;
1067 /* skip ring test for sriov*/
1068 if (amdgpu_sriov_vf(adev))
1071 r = amdgpu_ring_alloc(ring, 16);
1073 DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
1077 amdgpu_ring_write(ring, VCE_CMD_END);
1078 amdgpu_ring_commit(ring);
1080 for (i = 0; i < timeout; i++) {
1081 if (amdgpu_ring_get_rptr(ring) != rptr)
1087 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
1090 DRM_ERROR("amdgpu: ring %d test failed\n",
1099 * amdgpu_vce_ring_test_ib - test if VCE IBs are working
1101 * @ring: the engine to test on
1104 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1106 struct dma_fence *fence = NULL;
1109 /* skip vce ring1/2 ib test for now, since it's not reliable */
1110 if (ring != &ring->adev->vce.ring[0])
1113 r = amdgpu_vce_get_create_msg(ring, 1, NULL);
1115 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
1119 r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
1121 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
1125 r = dma_fence_wait_timeout(fence, false, timeout);
1127 DRM_ERROR("amdgpu: IB test timed out.\n");
1130 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
1132 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
1136 dma_fence_put(fence);