2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 #include <linux/firmware.h>
29 #include <linux/module.h>
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vce.h"
38 /* 1 second timeout */
39 #define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
42 #ifdef CONFIG_DRM_AMDGPU_CIK
43 #define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin"
44 #define FIRMWARE_KABINI "radeon/kabini_vce.bin"
45 #define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
46 #define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
47 #define FIRMWARE_MULLINS "radeon/mullins_vce.bin"
49 #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
50 #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
51 #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
52 #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
53 #define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
54 #define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
55 #define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
57 #define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin"
59 #ifdef CONFIG_DRM_AMDGPU_CIK
60 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
61 MODULE_FIRMWARE(FIRMWARE_KABINI);
62 MODULE_FIRMWARE(FIRMWARE_KAVERI);
63 MODULE_FIRMWARE(FIRMWARE_HAWAII);
64 MODULE_FIRMWARE(FIRMWARE_MULLINS);
66 MODULE_FIRMWARE(FIRMWARE_TONGA);
67 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
68 MODULE_FIRMWARE(FIRMWARE_FIJI);
69 MODULE_FIRMWARE(FIRMWARE_STONEY);
70 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
71 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
72 MODULE_FIRMWARE(FIRMWARE_POLARIS12);
74 MODULE_FIRMWARE(FIRMWARE_VEGA10);
76 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
79 * amdgpu_vce_init - allocate memory, load vce firmware
81 * @adev: amdgpu_device pointer
83 * First step to get VCE online, allocate memory and load the firmware
85 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
87 struct amdgpu_ring *ring;
88 struct amd_sched_rq *rq;
90 const struct common_firmware_header *hdr;
91 unsigned ucode_version, version_major, version_minor, binary_id;
94 switch (adev->asic_type) {
95 #ifdef CONFIG_DRM_AMDGPU_CIK
97 fw_name = FIRMWARE_BONAIRE;
100 fw_name = FIRMWARE_KAVERI;
103 fw_name = FIRMWARE_KABINI;
106 fw_name = FIRMWARE_HAWAII;
109 fw_name = FIRMWARE_MULLINS;
113 fw_name = FIRMWARE_TONGA;
116 fw_name = FIRMWARE_CARRIZO;
119 fw_name = FIRMWARE_FIJI;
122 fw_name = FIRMWARE_STONEY;
125 fw_name = FIRMWARE_POLARIS10;
128 fw_name = FIRMWARE_POLARIS11;
131 fw_name = FIRMWARE_VEGA10;
134 fw_name = FIRMWARE_POLARIS12;
141 r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
143 dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
148 r = amdgpu_ucode_validate(adev->vce.fw);
150 dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
152 release_firmware(adev->vce.fw);
157 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
159 ucode_version = le32_to_cpu(hdr->ucode_version);
160 version_major = (ucode_version >> 20) & 0xfff;
161 version_minor = (ucode_version >> 8) & 0xfff;
162 binary_id = ucode_version & 0xff;
163 DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
164 version_major, version_minor, binary_id);
165 adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
168 /* allocate firmware, stack and heap BO */
170 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
171 AMDGPU_GEM_DOMAIN_VRAM,
172 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
173 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
174 NULL, NULL, &adev->vce.vcpu_bo);
176 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
180 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
182 amdgpu_bo_unref(&adev->vce.vcpu_bo);
183 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
187 r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
188 &adev->vce.gpu_addr);
189 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
191 amdgpu_bo_unref(&adev->vce.vcpu_bo);
192 dev_err(adev->dev, "(%d) VCE bo pin failed\n", r);
197 ring = &adev->vce.ring[0];
198 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
199 r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
200 rq, amdgpu_sched_jobs);
202 DRM_ERROR("Failed setting up VCE run queue.\n");
206 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
207 atomic_set(&adev->vce.handles[i], 0);
208 adev->vce.filp[i] = NULL;
211 INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
212 mutex_init(&adev->vce.idle_mutex);
218 * amdgpu_vce_fini - free memory
220 * @adev: amdgpu_device pointer
222 * Last step on VCE teardown, free firmware memory
224 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
228 if (adev->vce.vcpu_bo == NULL)
231 amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
233 amdgpu_bo_unref(&adev->vce.vcpu_bo);
235 for (i = 0; i < adev->vce.num_rings; i++)
236 amdgpu_ring_fini(&adev->vce.ring[i]);
238 release_firmware(adev->vce.fw);
239 mutex_destroy(&adev->vce.idle_mutex);
245 * amdgpu_vce_suspend - unpin VCE fw memory
247 * @adev: amdgpu_device pointer
250 int amdgpu_vce_suspend(struct amdgpu_device *adev)
254 if (adev->vce.vcpu_bo == NULL)
257 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
258 if (atomic_read(&adev->vce.handles[i]))
261 if (i == AMDGPU_MAX_VCE_HANDLES)
264 cancel_delayed_work_sync(&adev->vce.idle_work);
265 /* TODO: suspending running encoding sessions isn't supported */
270 * amdgpu_vce_resume - pin VCE fw memory
272 * @adev: amdgpu_device pointer
275 int amdgpu_vce_resume(struct amdgpu_device *adev)
278 const struct common_firmware_header *hdr;
282 if (adev->vce.vcpu_bo == NULL)
285 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
287 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
291 r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
293 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
294 dev_err(adev->dev, "(%d) VCE map failed\n", r);
298 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
299 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
300 memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
301 adev->vce.fw->size - offset);
303 amdgpu_bo_kunmap(adev->vce.vcpu_bo);
305 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
311 * amdgpu_vce_idle_work_handler - power off VCE
313 * @work: pointer to work structure
315 * power of VCE when it's not used any more
317 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
319 struct amdgpu_device *adev =
320 container_of(work, struct amdgpu_device, vce.idle_work.work);
321 unsigned i, count = 0;
323 if (amdgpu_sriov_vf(adev))
326 for (i = 0; i < adev->vce.num_rings; i++)
327 count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
330 if (adev->pm.dpm_enabled) {
331 amdgpu_dpm_enable_vce(adev, false);
333 amdgpu_asic_set_vce_clocks(adev, 0, 0);
334 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
336 amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
340 schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
345 * amdgpu_vce_ring_begin_use - power up VCE
349 * Make sure VCE is powerd up when we want to use it
351 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
353 struct amdgpu_device *adev = ring->adev;
356 if (amdgpu_sriov_vf(adev))
359 mutex_lock(&adev->vce.idle_mutex);
360 set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
362 if (adev->pm.dpm_enabled) {
363 amdgpu_dpm_enable_vce(adev, true);
365 amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
366 amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
367 AMD_CG_STATE_UNGATE);
368 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
369 AMD_PG_STATE_UNGATE);
373 mutex_unlock(&adev->vce.idle_mutex);
377 * amdgpu_vce_ring_end_use - power VCE down
381 * Schedule work to power VCE down again
383 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
385 schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
389 * amdgpu_vce_free_handles - free still open VCE handles
391 * @adev: amdgpu_device pointer
392 * @filp: drm file pointer
394 * Close all VCE handles still open by this file pointer
396 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
398 struct amdgpu_ring *ring = &adev->vce.ring[0];
400 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
401 uint32_t handle = atomic_read(&adev->vce.handles[i]);
403 if (!handle || adev->vce.filp[i] != filp)
406 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
408 DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
410 adev->vce.filp[i] = NULL;
411 atomic_set(&adev->vce.handles[i], 0);
416 * amdgpu_vce_get_create_msg - generate a VCE create msg
418 * @adev: amdgpu_device pointer
419 * @ring: ring we should submit the msg to
420 * @handle: VCE session handle to use
421 * @fence: optional fence to return
423 * Open up a stream for HW test
425 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
426 struct dma_fence **fence)
428 const unsigned ib_size_dw = 1024;
429 struct amdgpu_job *job;
430 struct amdgpu_ib *ib;
431 struct dma_fence *f = NULL;
435 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
441 dummy = ib->gpu_addr + 1024;
443 /* stitch together an VCE create msg */
445 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
446 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
447 ib->ptr[ib->length_dw++] = handle;
449 if ((ring->adev->vce.fw_version >> 24) >= 52)
450 ib->ptr[ib->length_dw++] = 0x00000040; /* len */
452 ib->ptr[ib->length_dw++] = 0x00000030; /* len */
453 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
454 ib->ptr[ib->length_dw++] = 0x00000000;
455 ib->ptr[ib->length_dw++] = 0x00000042;
456 ib->ptr[ib->length_dw++] = 0x0000000a;
457 ib->ptr[ib->length_dw++] = 0x00000001;
458 ib->ptr[ib->length_dw++] = 0x00000080;
459 ib->ptr[ib->length_dw++] = 0x00000060;
460 ib->ptr[ib->length_dw++] = 0x00000100;
461 ib->ptr[ib->length_dw++] = 0x00000100;
462 ib->ptr[ib->length_dw++] = 0x0000000c;
463 ib->ptr[ib->length_dw++] = 0x00000000;
464 if ((ring->adev->vce.fw_version >> 24) >= 52) {
465 ib->ptr[ib->length_dw++] = 0x00000000;
466 ib->ptr[ib->length_dw++] = 0x00000000;
467 ib->ptr[ib->length_dw++] = 0x00000000;
468 ib->ptr[ib->length_dw++] = 0x00000000;
471 ib->ptr[ib->length_dw++] = 0x00000014; /* len */
472 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
473 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
474 ib->ptr[ib->length_dw++] = dummy;
475 ib->ptr[ib->length_dw++] = 0x00000001;
477 for (i = ib->length_dw; i < ib_size_dw; ++i)
480 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
481 job->fence = dma_fence_get(f);
485 amdgpu_job_free(job);
487 *fence = dma_fence_get(f);
492 amdgpu_job_free(job);
497 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
499 * @adev: amdgpu_device pointer
500 * @ring: ring we should submit the msg to
501 * @handle: VCE session handle to use
502 * @fence: optional fence to return
504 * Close up a stream for HW test or if userspace failed to do so
506 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
507 bool direct, struct dma_fence **fence)
509 const unsigned ib_size_dw = 1024;
510 struct amdgpu_job *job;
511 struct amdgpu_ib *ib;
512 struct dma_fence *f = NULL;
515 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
521 /* stitch together an VCE destroy msg */
523 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
524 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
525 ib->ptr[ib->length_dw++] = handle;
527 ib->ptr[ib->length_dw++] = 0x00000020; /* len */
528 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
529 ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
530 ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
531 ib->ptr[ib->length_dw++] = 0x00000000;
532 ib->ptr[ib->length_dw++] = 0x00000000;
533 ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
534 ib->ptr[ib->length_dw++] = 0x00000000;
536 ib->ptr[ib->length_dw++] = 0x00000008; /* len */
537 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
539 for (i = ib->length_dw; i < ib_size_dw; ++i)
543 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
544 job->fence = dma_fence_get(f);
548 amdgpu_job_free(job);
550 r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
551 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
557 *fence = dma_fence_get(f);
562 amdgpu_job_free(job);
567 * amdgpu_vce_cs_reloc - command submission relocation
570 * @lo: address of lower dword
571 * @hi: address of higher dword
572 * @size: minimum size
574 * Patch relocation inside command stream with real buffer address
576 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
577 int lo, int hi, unsigned size, uint32_t index)
579 struct amdgpu_bo_va_mapping *mapping;
580 struct amdgpu_bo *bo;
583 if (index == 0xffffffff)
586 addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
587 ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
588 addr += ((uint64_t)size) * ((uint64_t)index);
590 mapping = amdgpu_cs_find_mapping(p, addr, &bo);
591 if (mapping == NULL) {
592 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
593 addr, lo, hi, size, index);
597 if ((addr + (uint64_t)size) >
598 (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
599 DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
604 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
605 addr += amdgpu_bo_gpu_offset(bo);
606 addr -= ((uint64_t)size) * ((uint64_t)index);
608 amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
609 amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
615 * amdgpu_vce_validate_handle - validate stream handle
618 * @handle: handle to validate
619 * @allocated: allocated a new handle?
621 * Validates the handle and return the found session index or -EINVAL
622 * we we don't have another free session index.
624 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
625 uint32_t handle, uint32_t *allocated)
629 /* validate the handle */
630 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
631 if (atomic_read(&p->adev->vce.handles[i]) == handle) {
632 if (p->adev->vce.filp[i] != p->filp) {
633 DRM_ERROR("VCE handle collision detected!\n");
640 /* handle not found try to alloc a new one */
641 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
642 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
643 p->adev->vce.filp[i] = p->filp;
644 p->adev->vce.img_size[i] = 0;
645 *allocated |= 1 << i;
650 DRM_ERROR("No more free VCE handles!\n");
655 * amdgpu_vce_cs_parse - parse and validate the command stream
660 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
662 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
663 unsigned fb_idx = 0, bs_idx = 0;
664 int session_idx = -1;
665 uint32_t destroyed = 0;
666 uint32_t created = 0;
667 uint32_t allocated = 0;
668 uint32_t tmp, handle = 0;
669 uint32_t *size = &tmp;
673 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
675 r = amdgpu_cs_sysvm_access_required(p);
679 while (idx < ib->length_dw) {
680 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
681 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
683 if ((len < 8) || (len & 3)) {
684 DRM_ERROR("invalid VCE command length (%d)!\n", len);
690 case 0x00000001: /* session */
691 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
692 session_idx = amdgpu_vce_validate_handle(p, handle,
694 if (session_idx < 0) {
698 size = &p->adev->vce.img_size[session_idx];
701 case 0x00000002: /* task info */
702 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
703 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
706 case 0x01000001: /* create */
707 created |= 1 << session_idx;
708 if (destroyed & (1 << session_idx)) {
709 destroyed &= ~(1 << session_idx);
710 allocated |= 1 << session_idx;
712 } else if (!(allocated & (1 << session_idx))) {
713 DRM_ERROR("Handle already in use!\n");
718 *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
719 amdgpu_get_ib_value(p, ib_idx, idx + 10) *
723 case 0x04000001: /* config extension */
724 case 0x04000002: /* pic control */
725 case 0x04000005: /* rate control */
726 case 0x04000007: /* motion estimation */
727 case 0x04000008: /* rdo */
728 case 0x04000009: /* vui */
729 case 0x05000002: /* auxiliary buffer */
730 case 0x05000009: /* clock table */
733 case 0x0500000c: /* hw config */
734 switch (p->adev->asic_type) {
735 #ifdef CONFIG_DRM_AMDGPU_CIK
747 case 0x03000001: /* encode */
748 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
753 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
759 case 0x02000001: /* destroy */
760 destroyed |= 1 << session_idx;
763 case 0x05000001: /* context buffer */
764 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
770 case 0x05000004: /* video bitstream buffer */
771 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
772 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
778 case 0x05000005: /* feedback buffer */
779 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
786 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
791 if (session_idx == -1) {
792 DRM_ERROR("no session command at start of IB\n");
800 if (allocated & ~created) {
801 DRM_ERROR("New session without create command!\n");
807 /* No error, free all destroyed handle slots */
810 /* Error during parsing, free all allocated handle slots */
814 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
816 atomic_set(&p->adev->vce.handles[i], 0);
822 * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
827 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
829 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
830 int session_idx = -1;
831 uint32_t destroyed = 0;
832 uint32_t created = 0;
833 uint32_t allocated = 0;
834 uint32_t tmp, handle = 0;
835 int i, r = 0, idx = 0;
837 while (idx < ib->length_dw) {
838 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
839 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
841 if ((len < 8) || (len & 3)) {
842 DRM_ERROR("invalid VCE command length (%d)!\n", len);
848 case 0x00000001: /* session */
849 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
850 session_idx = amdgpu_vce_validate_handle(p, handle,
852 if (session_idx < 0) {
858 case 0x01000001: /* create */
859 created |= 1 << session_idx;
860 if (destroyed & (1 << session_idx)) {
861 destroyed &= ~(1 << session_idx);
862 allocated |= 1 << session_idx;
864 } else if (!(allocated & (1 << session_idx))) {
865 DRM_ERROR("Handle already in use!\n");
872 case 0x02000001: /* destroy */
873 destroyed |= 1 << session_idx;
880 if (session_idx == -1) {
881 DRM_ERROR("no session command at start of IB\n");
889 if (allocated & ~created) {
890 DRM_ERROR("New session without create command!\n");
896 /* No error, free all destroyed handle slots */
898 amdgpu_ib_free(p->adev, ib, NULL);
900 /* Error during parsing, free all allocated handle slots */
904 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
906 atomic_set(&p->adev->vce.handles[i], 0);
912 * amdgpu_vce_ring_emit_ib - execute indirect buffer
914 * @ring: engine to use
915 * @ib: the IB to execute
918 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
919 unsigned vm_id, bool ctx_switch)
921 amdgpu_ring_write(ring, VCE_CMD_IB);
922 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
923 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
924 amdgpu_ring_write(ring, ib->length_dw);
928 * amdgpu_vce_ring_emit_fence - add a fence command to the ring
930 * @ring: engine to use
934 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
937 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
939 amdgpu_ring_write(ring, VCE_CMD_FENCE);
940 amdgpu_ring_write(ring, addr);
941 amdgpu_ring_write(ring, upper_32_bits(addr));
942 amdgpu_ring_write(ring, seq);
943 amdgpu_ring_write(ring, VCE_CMD_TRAP);
944 amdgpu_ring_write(ring, VCE_CMD_END);
948 * amdgpu_vce_ring_test_ring - test if VCE ring is working
950 * @ring: the engine to test on
953 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
955 struct amdgpu_device *adev = ring->adev;
956 uint32_t rptr = amdgpu_ring_get_rptr(ring);
958 int r, timeout = adev->usec_timeout;
960 /* workaround VCE ring test slow issue for sriov*/
961 if (amdgpu_sriov_vf(adev))
964 r = amdgpu_ring_alloc(ring, 16);
966 DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
970 amdgpu_ring_write(ring, VCE_CMD_END);
971 amdgpu_ring_commit(ring);
973 for (i = 0; i < timeout; i++) {
974 if (amdgpu_ring_get_rptr(ring) != rptr)
980 DRM_INFO("ring test on %d succeeded in %d usecs\n",
983 DRM_ERROR("amdgpu: ring %d test failed\n",
992 * amdgpu_vce_ring_test_ib - test if VCE IBs are working
994 * @ring: the engine to test on
997 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
999 struct dma_fence *fence = NULL;
1002 /* skip vce ring1/2 ib test for now, since it's not reliable */
1003 if (ring != &ring->adev->vce.ring[0])
1006 r = amdgpu_vce_get_create_msg(ring, 1, NULL);
1008 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
1012 r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
1014 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
1018 r = dma_fence_wait_timeout(fence, false, timeout);
1020 DRM_ERROR("amdgpu: IB test timed out.\n");
1023 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
1025 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
1029 dma_fence_put(fence);