2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 #include <linux/firmware.h>
29 #include <linux/module.h>
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vce.h"
38 /* 1 second timeout */
39 #define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
42 #ifdef CONFIG_DRM_AMDGPU_CIK
43 #define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin"
44 #define FIRMWARE_KABINI "radeon/kabini_vce.bin"
45 #define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
46 #define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
47 #define FIRMWARE_MULLINS "radeon/mullins_vce.bin"
49 #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
50 #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
51 #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
52 #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
53 #define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
54 #define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
55 #define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
57 #ifdef CONFIG_DRM_AMDGPU_CIK
58 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
59 MODULE_FIRMWARE(FIRMWARE_KABINI);
60 MODULE_FIRMWARE(FIRMWARE_KAVERI);
61 MODULE_FIRMWARE(FIRMWARE_HAWAII);
62 MODULE_FIRMWARE(FIRMWARE_MULLINS);
64 MODULE_FIRMWARE(FIRMWARE_TONGA);
65 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
66 MODULE_FIRMWARE(FIRMWARE_FIJI);
67 MODULE_FIRMWARE(FIRMWARE_STONEY);
68 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
69 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
70 MODULE_FIRMWARE(FIRMWARE_POLARIS12);
72 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
75 * amdgpu_vce_init - allocate memory, load vce firmware
77 * @adev: amdgpu_device pointer
79 * First step to get VCE online, allocate memory and load the firmware
81 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
83 struct amdgpu_ring *ring;
84 struct amd_sched_rq *rq;
86 const struct common_firmware_header *hdr;
87 unsigned ucode_version, version_major, version_minor, binary_id;
90 switch (adev->asic_type) {
91 #ifdef CONFIG_DRM_AMDGPU_CIK
93 fw_name = FIRMWARE_BONAIRE;
96 fw_name = FIRMWARE_KAVERI;
99 fw_name = FIRMWARE_KABINI;
102 fw_name = FIRMWARE_HAWAII;
105 fw_name = FIRMWARE_MULLINS;
109 fw_name = FIRMWARE_TONGA;
112 fw_name = FIRMWARE_CARRIZO;
115 fw_name = FIRMWARE_FIJI;
118 fw_name = FIRMWARE_STONEY;
121 fw_name = FIRMWARE_POLARIS10;
124 fw_name = FIRMWARE_POLARIS11;
127 fw_name = FIRMWARE_POLARIS12;
134 r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
136 dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
141 r = amdgpu_ucode_validate(adev->vce.fw);
143 dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
145 release_firmware(adev->vce.fw);
150 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
152 ucode_version = le32_to_cpu(hdr->ucode_version);
153 version_major = (ucode_version >> 20) & 0xfff;
154 version_minor = (ucode_version >> 8) & 0xfff;
155 binary_id = ucode_version & 0xff;
156 DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
157 version_major, version_minor, binary_id);
158 adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
161 /* allocate firmware, stack and heap BO */
163 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
164 AMDGPU_GEM_DOMAIN_VRAM,
165 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
166 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
167 NULL, NULL, &adev->vce.vcpu_bo);
169 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
173 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
175 amdgpu_bo_unref(&adev->vce.vcpu_bo);
176 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
180 r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
181 &adev->vce.gpu_addr);
182 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
184 amdgpu_bo_unref(&adev->vce.vcpu_bo);
185 dev_err(adev->dev, "(%d) VCE bo pin failed\n", r);
190 ring = &adev->vce.ring[0];
191 rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
192 r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
193 rq, amdgpu_sched_jobs);
195 DRM_ERROR("Failed setting up VCE run queue.\n");
199 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
200 atomic_set(&adev->vce.handles[i], 0);
201 adev->vce.filp[i] = NULL;
204 INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
205 mutex_init(&adev->vce.idle_mutex);
211 * amdgpu_vce_fini - free memory
213 * @adev: amdgpu_device pointer
215 * Last step on VCE teardown, free firmware memory
217 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
221 if (adev->vce.vcpu_bo == NULL)
224 amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
226 amdgpu_bo_unref(&adev->vce.vcpu_bo);
228 for (i = 0; i < adev->vce.num_rings; i++)
229 amdgpu_ring_fini(&adev->vce.ring[i]);
231 release_firmware(adev->vce.fw);
232 mutex_destroy(&adev->vce.idle_mutex);
238 * amdgpu_vce_suspend - unpin VCE fw memory
240 * @adev: amdgpu_device pointer
243 int amdgpu_vce_suspend(struct amdgpu_device *adev)
247 if (adev->vce.vcpu_bo == NULL)
250 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
251 if (atomic_read(&adev->vce.handles[i]))
254 if (i == AMDGPU_MAX_VCE_HANDLES)
257 cancel_delayed_work_sync(&adev->vce.idle_work);
258 /* TODO: suspending running encoding sessions isn't supported */
263 * amdgpu_vce_resume - pin VCE fw memory
265 * @adev: amdgpu_device pointer
268 int amdgpu_vce_resume(struct amdgpu_device *adev)
271 const struct common_firmware_header *hdr;
275 if (adev->vce.vcpu_bo == NULL)
278 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
280 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
284 r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
286 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
287 dev_err(adev->dev, "(%d) VCE map failed\n", r);
291 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
292 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
293 memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
294 adev->vce.fw->size - offset);
296 amdgpu_bo_kunmap(adev->vce.vcpu_bo);
298 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
304 * amdgpu_vce_idle_work_handler - power off VCE
306 * @work: pointer to work structure
308 * power of VCE when it's not used any more
310 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
312 struct amdgpu_device *adev =
313 container_of(work, struct amdgpu_device, vce.idle_work.work);
314 unsigned i, count = 0;
316 for (i = 0; i < adev->vce.num_rings; i++)
317 count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
320 if (adev->pm.dpm_enabled) {
321 amdgpu_dpm_enable_vce(adev, false);
323 amdgpu_asic_set_vce_clocks(adev, 0, 0);
324 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
326 amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
330 schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
335 * amdgpu_vce_ring_begin_use - power up VCE
339 * Make sure VCE is powerd up when we want to use it
341 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
343 struct amdgpu_device *adev = ring->adev;
346 mutex_lock(&adev->vce.idle_mutex);
347 set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
349 if (adev->pm.dpm_enabled) {
350 amdgpu_dpm_enable_vce(adev, true);
352 amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
353 amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
354 AMD_CG_STATE_UNGATE);
355 amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
356 AMD_PG_STATE_UNGATE);
360 mutex_unlock(&adev->vce.idle_mutex);
364 * amdgpu_vce_ring_end_use - power VCE down
368 * Schedule work to power VCE down again
370 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
372 schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
376 * amdgpu_vce_free_handles - free still open VCE handles
378 * @adev: amdgpu_device pointer
379 * @filp: drm file pointer
381 * Close all VCE handles still open by this file pointer
383 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
385 struct amdgpu_ring *ring = &adev->vce.ring[0];
387 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
388 uint32_t handle = atomic_read(&adev->vce.handles[i]);
390 if (!handle || adev->vce.filp[i] != filp)
393 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
395 DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
397 adev->vce.filp[i] = NULL;
398 atomic_set(&adev->vce.handles[i], 0);
403 * amdgpu_vce_get_create_msg - generate a VCE create msg
405 * @adev: amdgpu_device pointer
406 * @ring: ring we should submit the msg to
407 * @handle: VCE session handle to use
408 * @fence: optional fence to return
410 * Open up a stream for HW test
412 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
413 struct dma_fence **fence)
415 const unsigned ib_size_dw = 1024;
416 struct amdgpu_job *job;
417 struct amdgpu_ib *ib;
418 struct dma_fence *f = NULL;
422 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
428 dummy = ib->gpu_addr + 1024;
430 /* stitch together an VCE create msg */
432 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
433 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
434 ib->ptr[ib->length_dw++] = handle;
436 if ((ring->adev->vce.fw_version >> 24) >= 52)
437 ib->ptr[ib->length_dw++] = 0x00000040; /* len */
439 ib->ptr[ib->length_dw++] = 0x00000030; /* len */
440 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
441 ib->ptr[ib->length_dw++] = 0x00000000;
442 ib->ptr[ib->length_dw++] = 0x00000042;
443 ib->ptr[ib->length_dw++] = 0x0000000a;
444 ib->ptr[ib->length_dw++] = 0x00000001;
445 ib->ptr[ib->length_dw++] = 0x00000080;
446 ib->ptr[ib->length_dw++] = 0x00000060;
447 ib->ptr[ib->length_dw++] = 0x00000100;
448 ib->ptr[ib->length_dw++] = 0x00000100;
449 ib->ptr[ib->length_dw++] = 0x0000000c;
450 ib->ptr[ib->length_dw++] = 0x00000000;
451 if ((ring->adev->vce.fw_version >> 24) >= 52) {
452 ib->ptr[ib->length_dw++] = 0x00000000;
453 ib->ptr[ib->length_dw++] = 0x00000000;
454 ib->ptr[ib->length_dw++] = 0x00000000;
455 ib->ptr[ib->length_dw++] = 0x00000000;
458 ib->ptr[ib->length_dw++] = 0x00000014; /* len */
459 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
460 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
461 ib->ptr[ib->length_dw++] = dummy;
462 ib->ptr[ib->length_dw++] = 0x00000001;
464 for (i = ib->length_dw; i < ib_size_dw; ++i)
467 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
468 job->fence = dma_fence_get(f);
472 amdgpu_job_free(job);
474 *fence = dma_fence_get(f);
479 amdgpu_job_free(job);
484 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
486 * @adev: amdgpu_device pointer
487 * @ring: ring we should submit the msg to
488 * @handle: VCE session handle to use
489 * @fence: optional fence to return
491 * Close up a stream for HW test or if userspace failed to do so
493 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
494 bool direct, struct dma_fence **fence)
496 const unsigned ib_size_dw = 1024;
497 struct amdgpu_job *job;
498 struct amdgpu_ib *ib;
499 struct dma_fence *f = NULL;
502 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
508 /* stitch together an VCE destroy msg */
510 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
511 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
512 ib->ptr[ib->length_dw++] = handle;
514 ib->ptr[ib->length_dw++] = 0x00000020; /* len */
515 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
516 ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
517 ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
518 ib->ptr[ib->length_dw++] = 0x00000000;
519 ib->ptr[ib->length_dw++] = 0x00000000;
520 ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
521 ib->ptr[ib->length_dw++] = 0x00000000;
523 ib->ptr[ib->length_dw++] = 0x00000008; /* len */
524 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
526 for (i = ib->length_dw; i < ib_size_dw; ++i)
530 r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
531 job->fence = dma_fence_get(f);
535 amdgpu_job_free(job);
537 r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
538 AMDGPU_FENCE_OWNER_UNDEFINED, &f);
544 *fence = dma_fence_get(f);
549 amdgpu_job_free(job);
554 * amdgpu_vce_cs_reloc - command submission relocation
557 * @lo: address of lower dword
558 * @hi: address of higher dword
559 * @size: minimum size
561 * Patch relocation inside command stream with real buffer address
563 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
564 int lo, int hi, unsigned size, uint32_t index)
566 struct amdgpu_bo_va_mapping *mapping;
567 struct amdgpu_bo *bo;
570 if (index == 0xffffffff)
573 addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
574 ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
575 addr += ((uint64_t)size) * ((uint64_t)index);
577 mapping = amdgpu_cs_find_mapping(p, addr, &bo);
578 if (mapping == NULL) {
579 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
580 addr, lo, hi, size, index);
584 if ((addr + (uint64_t)size) >
585 ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
586 DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
591 addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
592 addr += amdgpu_bo_gpu_offset(bo);
593 addr -= ((uint64_t)size) * ((uint64_t)index);
595 amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
596 amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
602 * amdgpu_vce_validate_handle - validate stream handle
605 * @handle: handle to validate
606 * @allocated: allocated a new handle?
608 * Validates the handle and return the found session index or -EINVAL
609 * we we don't have another free session index.
611 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
612 uint32_t handle, uint32_t *allocated)
616 /* validate the handle */
617 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
618 if (atomic_read(&p->adev->vce.handles[i]) == handle) {
619 if (p->adev->vce.filp[i] != p->filp) {
620 DRM_ERROR("VCE handle collision detected!\n");
627 /* handle not found try to alloc a new one */
628 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
629 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
630 p->adev->vce.filp[i] = p->filp;
631 p->adev->vce.img_size[i] = 0;
632 *allocated |= 1 << i;
637 DRM_ERROR("No more free VCE handles!\n");
642 * amdgpu_vce_cs_parse - parse and validate the command stream
647 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
649 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
650 unsigned fb_idx = 0, bs_idx = 0;
651 int session_idx = -1;
652 uint32_t destroyed = 0;
653 uint32_t created = 0;
654 uint32_t allocated = 0;
655 uint32_t tmp, handle = 0;
656 uint32_t *size = &tmp;
660 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
662 r = amdgpu_cs_sysvm_access_required(p);
666 while (idx < ib->length_dw) {
667 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
668 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
670 if ((len < 8) || (len & 3)) {
671 DRM_ERROR("invalid VCE command length (%d)!\n", len);
677 case 0x00000001: /* session */
678 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
679 session_idx = amdgpu_vce_validate_handle(p, handle,
681 if (session_idx < 0) {
685 size = &p->adev->vce.img_size[session_idx];
688 case 0x00000002: /* task info */
689 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
690 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
693 case 0x01000001: /* create */
694 created |= 1 << session_idx;
695 if (destroyed & (1 << session_idx)) {
696 destroyed &= ~(1 << session_idx);
697 allocated |= 1 << session_idx;
699 } else if (!(allocated & (1 << session_idx))) {
700 DRM_ERROR("Handle already in use!\n");
705 *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
706 amdgpu_get_ib_value(p, ib_idx, idx + 10) *
710 case 0x04000001: /* config extension */
711 case 0x04000002: /* pic control */
712 case 0x04000005: /* rate control */
713 case 0x04000007: /* motion estimation */
714 case 0x04000008: /* rdo */
715 case 0x04000009: /* vui */
716 case 0x05000002: /* auxiliary buffer */
717 case 0x05000009: /* clock table */
720 case 0x0500000c: /* hw config */
721 switch (p->adev->asic_type) {
722 #ifdef CONFIG_DRM_AMDGPU_CIK
734 case 0x03000001: /* encode */
735 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
740 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
746 case 0x02000001: /* destroy */
747 destroyed |= 1 << session_idx;
750 case 0x05000001: /* context buffer */
751 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
757 case 0x05000004: /* video bitstream buffer */
758 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
759 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
765 case 0x05000005: /* feedback buffer */
766 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
773 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
778 if (session_idx == -1) {
779 DRM_ERROR("no session command at start of IB\n");
787 if (allocated & ~created) {
788 DRM_ERROR("New session without create command!\n");
794 /* No error, free all destroyed handle slots */
797 /* Error during parsing, free all allocated handle slots */
801 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
803 atomic_set(&p->adev->vce.handles[i], 0);
809 * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
814 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
816 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
817 int session_idx = -1;
818 uint32_t destroyed = 0;
819 uint32_t created = 0;
820 uint32_t allocated = 0;
821 uint32_t tmp, handle = 0;
822 int i, r = 0, idx = 0;
824 while (idx < ib->length_dw) {
825 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
826 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
828 if ((len < 8) || (len & 3)) {
829 DRM_ERROR("invalid VCE command length (%d)!\n", len);
835 case 0x00000001: /* session */
836 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
837 session_idx = amdgpu_vce_validate_handle(p, handle,
839 if (session_idx < 0) {
845 case 0x01000001: /* create */
846 created |= 1 << session_idx;
847 if (destroyed & (1 << session_idx)) {
848 destroyed &= ~(1 << session_idx);
849 allocated |= 1 << session_idx;
851 } else if (!(allocated & (1 << session_idx))) {
852 DRM_ERROR("Handle already in use!\n");
859 case 0x02000001: /* destroy */
860 destroyed |= 1 << session_idx;
867 if (session_idx == -1) {
868 DRM_ERROR("no session command at start of IB\n");
876 if (allocated & ~created) {
877 DRM_ERROR("New session without create command!\n");
883 /* No error, free all destroyed handle slots */
885 amdgpu_ib_free(p->adev, ib, NULL);
887 /* Error during parsing, free all allocated handle slots */
891 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
893 atomic_set(&p->adev->vce.handles[i], 0);
899 * amdgpu_vce_ring_emit_ib - execute indirect buffer
901 * @ring: engine to use
902 * @ib: the IB to execute
905 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
906 unsigned vm_id, bool ctx_switch)
908 amdgpu_ring_write(ring, VCE_CMD_IB);
909 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
910 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
911 amdgpu_ring_write(ring, ib->length_dw);
915 * amdgpu_vce_ring_emit_fence - add a fence command to the ring
917 * @ring: engine to use
921 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
924 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
926 amdgpu_ring_write(ring, VCE_CMD_FENCE);
927 amdgpu_ring_write(ring, addr);
928 amdgpu_ring_write(ring, upper_32_bits(addr));
929 amdgpu_ring_write(ring, seq);
930 amdgpu_ring_write(ring, VCE_CMD_TRAP);
931 amdgpu_ring_write(ring, VCE_CMD_END);
935 * amdgpu_vce_ring_test_ring - test if VCE ring is working
937 * @ring: the engine to test on
940 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
942 struct amdgpu_device *adev = ring->adev;
943 uint32_t rptr = amdgpu_ring_get_rptr(ring);
947 r = amdgpu_ring_alloc(ring, 16);
949 DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
953 amdgpu_ring_write(ring, VCE_CMD_END);
954 amdgpu_ring_commit(ring);
956 for (i = 0; i < adev->usec_timeout; i++) {
957 if (amdgpu_ring_get_rptr(ring) != rptr)
962 if (i < adev->usec_timeout) {
963 DRM_INFO("ring test on %d succeeded in %d usecs\n",
966 DRM_ERROR("amdgpu: ring %d test failed\n",
975 * amdgpu_vce_ring_test_ib - test if VCE IBs are working
977 * @ring: the engine to test on
980 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
982 struct dma_fence *fence = NULL;
985 /* skip vce ring1/2 ib test for now, since it's not reliable */
986 if (ring != &ring->adev->vce.ring[0])
989 r = amdgpu_vce_get_create_msg(ring, 1, NULL);
991 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
995 r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
997 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
1001 r = dma_fence_wait_timeout(fence, false, timeout);
1003 DRM_ERROR("amdgpu: IB test timed out.\n");
1006 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
1008 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
1012 dma_fence_put(fence);