2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 #include <linux/firmware.h>
29 #include <linux/module.h>
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vce.h"
38 /* 1 second timeout */
39 #define VCE_IDLE_TIMEOUT_MS 1000
42 #ifdef CONFIG_DRM_AMDGPU_CIK
43 #define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin"
44 #define FIRMWARE_KABINI "radeon/kabini_vce.bin"
45 #define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
46 #define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
47 #define FIRMWARE_MULLINS "radeon/mullins_vce.bin"
49 #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
50 #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
51 #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
53 #ifdef CONFIG_DRM_AMDGPU_CIK
54 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
55 MODULE_FIRMWARE(FIRMWARE_KABINI);
56 MODULE_FIRMWARE(FIRMWARE_KAVERI);
57 MODULE_FIRMWARE(FIRMWARE_HAWAII);
58 MODULE_FIRMWARE(FIRMWARE_MULLINS);
60 MODULE_FIRMWARE(FIRMWARE_TONGA);
61 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
62 MODULE_FIRMWARE(FIRMWARE_FIJI);
64 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
67 * amdgpu_vce_init - allocate memory, load vce firmware
69 * @adev: amdgpu_device pointer
71 * First step to get VCE online, allocate memory and load the firmware
73 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
76 const struct common_firmware_header *hdr;
77 unsigned ucode_version, version_major, version_minor, binary_id;
80 INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
82 switch (adev->asic_type) {
83 #ifdef CONFIG_DRM_AMDGPU_CIK
85 fw_name = FIRMWARE_BONAIRE;
88 fw_name = FIRMWARE_KAVERI;
91 fw_name = FIRMWARE_KABINI;
94 fw_name = FIRMWARE_HAWAII;
97 fw_name = FIRMWARE_MULLINS;
101 fw_name = FIRMWARE_TONGA;
104 fw_name = FIRMWARE_CARRIZO;
107 fw_name = FIRMWARE_FIJI;
114 r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
116 dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
121 r = amdgpu_ucode_validate(adev->vce.fw);
123 dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
125 release_firmware(adev->vce.fw);
130 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
132 ucode_version = le32_to_cpu(hdr->ucode_version);
133 version_major = (ucode_version >> 20) & 0xfff;
134 version_minor = (ucode_version >> 8) & 0xfff;
135 binary_id = ucode_version & 0xff;
136 DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
137 version_major, version_minor, binary_id);
138 adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
141 /* allocate firmware, stack and heap BO */
143 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
144 AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->vce.vcpu_bo);
146 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
150 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
152 amdgpu_bo_unref(&adev->vce.vcpu_bo);
153 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
157 r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
158 &adev->vce.gpu_addr);
159 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
161 amdgpu_bo_unref(&adev->vce.vcpu_bo);
162 dev_err(adev->dev, "(%d) VCE bo pin failed\n", r);
166 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
167 atomic_set(&adev->vce.handles[i], 0);
168 adev->vce.filp[i] = NULL;
175 * amdgpu_vce_fini - free memory
177 * @adev: amdgpu_device pointer
179 * Last step on VCE teardown, free firmware memory
181 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
183 if (adev->vce.vcpu_bo == NULL)
186 amdgpu_bo_unref(&adev->vce.vcpu_bo);
188 amdgpu_ring_fini(&adev->vce.ring[0]);
189 amdgpu_ring_fini(&adev->vce.ring[1]);
191 release_firmware(adev->vce.fw);
197 * amdgpu_vce_suspend - unpin VCE fw memory
199 * @adev: amdgpu_device pointer
202 int amdgpu_vce_suspend(struct amdgpu_device *adev)
206 if (adev->vce.vcpu_bo == NULL)
209 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
210 if (atomic_read(&adev->vce.handles[i]))
213 if (i == AMDGPU_MAX_VCE_HANDLES)
216 /* TODO: suspending running encoding sessions isn't supported */
221 * amdgpu_vce_resume - pin VCE fw memory
223 * @adev: amdgpu_device pointer
226 int amdgpu_vce_resume(struct amdgpu_device *adev)
229 const struct common_firmware_header *hdr;
233 if (adev->vce.vcpu_bo == NULL)
236 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
238 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
242 r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
244 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
245 dev_err(adev->dev, "(%d) VCE map failed\n", r);
249 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
250 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
251 memcpy(cpu_addr, (adev->vce.fw->data) + offset,
252 (adev->vce.fw->size) - offset);
254 amdgpu_bo_kunmap(adev->vce.vcpu_bo);
256 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
262 * amdgpu_vce_idle_work_handler - power off VCE
264 * @work: pointer to work structure
266 * power of VCE when it's not used any more
268 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
270 struct amdgpu_device *adev =
271 container_of(work, struct amdgpu_device, vce.idle_work.work);
273 if ((amdgpu_fence_count_emitted(&adev->vce.ring[0]) == 0) &&
274 (amdgpu_fence_count_emitted(&adev->vce.ring[1]) == 0)) {
275 if (adev->pm.dpm_enabled) {
276 amdgpu_dpm_enable_vce(adev, false);
278 amdgpu_asic_set_vce_clocks(adev, 0, 0);
281 schedule_delayed_work(&adev->vce.idle_work,
282 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
287 * amdgpu_vce_note_usage - power up VCE
289 * @adev: amdgpu_device pointer
291 * Make sure VCE is powerd up when we want to use it
293 static void amdgpu_vce_note_usage(struct amdgpu_device *adev)
295 bool streams_changed = false;
296 bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
297 set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
298 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
300 if (adev->pm.dpm_enabled) {
301 /* XXX figure out if the streams changed */
302 streams_changed = false;
305 if (set_clocks || streams_changed) {
306 if (adev->pm.dpm_enabled) {
307 amdgpu_dpm_enable_vce(adev, true);
309 amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
315 * amdgpu_vce_free_handles - free still open VCE handles
317 * @adev: amdgpu_device pointer
318 * @filp: drm file pointer
320 * Close all VCE handles still open by this file pointer
322 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
324 struct amdgpu_ring *ring = &adev->vce.ring[0];
326 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
327 uint32_t handle = atomic_read(&adev->vce.handles[i]);
328 if (!handle || adev->vce.filp[i] != filp)
331 amdgpu_vce_note_usage(adev);
333 r = amdgpu_vce_get_destroy_msg(ring, handle, NULL);
335 DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
337 adev->vce.filp[i] = NULL;
338 atomic_set(&adev->vce.handles[i], 0);
343 * amdgpu_vce_get_create_msg - generate a VCE create msg
345 * @adev: amdgpu_device pointer
346 * @ring: ring we should submit the msg to
347 * @handle: VCE session handle to use
348 * @fence: optional fence to return
350 * Open up a stream for HW test
352 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
353 struct amdgpu_fence **fence)
355 const unsigned ib_size_dw = 1024;
360 r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib);
362 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
366 dummy = ib.gpu_addr + 1024;
368 /* stitch together an VCE create msg */
370 ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
371 ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
372 ib.ptr[ib.length_dw++] = handle;
374 ib.ptr[ib.length_dw++] = 0x00000030; /* len */
375 ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */
376 ib.ptr[ib.length_dw++] = 0x00000000;
377 ib.ptr[ib.length_dw++] = 0x00000042;
378 ib.ptr[ib.length_dw++] = 0x0000000a;
379 ib.ptr[ib.length_dw++] = 0x00000001;
380 ib.ptr[ib.length_dw++] = 0x00000080;
381 ib.ptr[ib.length_dw++] = 0x00000060;
382 ib.ptr[ib.length_dw++] = 0x00000100;
383 ib.ptr[ib.length_dw++] = 0x00000100;
384 ib.ptr[ib.length_dw++] = 0x0000000c;
385 ib.ptr[ib.length_dw++] = 0x00000000;
387 ib.ptr[ib.length_dw++] = 0x00000014; /* len */
388 ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
389 ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
390 ib.ptr[ib.length_dw++] = dummy;
391 ib.ptr[ib.length_dw++] = 0x00000001;
393 for (i = ib.length_dw; i < ib_size_dw; ++i)
396 r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
398 DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
402 *fence = amdgpu_fence_ref(ib.fence);
404 amdgpu_ib_free(ring->adev, &ib);
410 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
412 * @adev: amdgpu_device pointer
413 * @ring: ring we should submit the msg to
414 * @handle: VCE session handle to use
415 * @fence: optional fence to return
417 * Close up a stream for HW test or if userspace failed to do so
419 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
420 struct amdgpu_fence **fence)
422 const unsigned ib_size_dw = 1024;
427 r = amdgpu_ib_get(ring, NULL, ib_size_dw * 4, &ib);
429 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
433 dummy = ib.gpu_addr + 1024;
435 /* stitch together an VCE destroy msg */
437 ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
438 ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
439 ib.ptr[ib.length_dw++] = handle;
441 ib.ptr[ib.length_dw++] = 0x00000014; /* len */
442 ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
443 ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
444 ib.ptr[ib.length_dw++] = dummy;
445 ib.ptr[ib.length_dw++] = 0x00000001;
447 ib.ptr[ib.length_dw++] = 0x00000008; /* len */
448 ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */
450 for (i = ib.length_dw; i < ib_size_dw; ++i)
453 r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED);
455 DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r);
459 *fence = amdgpu_fence_ref(ib.fence);
461 amdgpu_ib_free(ring->adev, &ib);
467 * amdgpu_vce_cs_reloc - command submission relocation
470 * @lo: address of lower dword
471 * @hi: address of higher dword
472 * @size: minimum size
474 * Patch relocation inside command stream with real buffer address
476 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
477 int lo, int hi, unsigned size, uint32_t index)
479 struct amdgpu_bo_va_mapping *mapping;
480 struct amdgpu_ib *ib = &p->ibs[ib_idx];
481 struct amdgpu_bo *bo;
484 if (index == 0xffffffff)
487 addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
488 ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
489 addr += ((uint64_t)size) * ((uint64_t)index);
491 mapping = amdgpu_cs_find_mapping(p, addr, &bo);
492 if (mapping == NULL) {
493 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
494 addr, lo, hi, size, index);
498 if ((addr + (uint64_t)size) >
499 ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
500 DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
505 addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
506 addr += amdgpu_bo_gpu_offset(bo);
507 addr -= ((uint64_t)size) * ((uint64_t)index);
509 ib->ptr[lo] = addr & 0xFFFFFFFF;
510 ib->ptr[hi] = addr >> 32;
516 * amdgpu_vce_validate_handle - validate stream handle
519 * @handle: handle to validate
520 * @allocated: allocated a new handle?
522 * Validates the handle and return the found session index or -EINVAL
523 * we we don't have another free session index.
525 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
526 uint32_t handle, bool *allocated)
532 /* validate the handle */
533 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
534 if (atomic_read(&p->adev->vce.handles[i]) == handle) {
535 if (p->adev->vce.filp[i] != p->filp) {
536 DRM_ERROR("VCE handle collision detected!\n");
543 /* handle not found try to alloc a new one */
544 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
545 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
546 p->adev->vce.filp[i] = p->filp;
547 p->adev->vce.img_size[i] = 0;
553 DRM_ERROR("No more free VCE handles!\n");
558 * amdgpu_vce_cs_parse - parse and validate the command stream
563 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
565 struct amdgpu_ib *ib = &p->ibs[ib_idx];
566 unsigned fb_idx = 0, bs_idx = 0;
567 int session_idx = -1;
568 bool destroyed = false;
569 bool created = false;
570 bool allocated = false;
571 uint32_t tmp, handle = 0;
572 uint32_t *size = &tmp;
573 int i, r = 0, idx = 0;
575 amdgpu_vce_note_usage(p->adev);
577 while (idx < ib->length_dw) {
578 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
579 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
581 if ((len < 8) || (len & 3)) {
582 DRM_ERROR("invalid VCE command length (%d)!\n", len);
588 DRM_ERROR("No other command allowed after destroy!\n");
594 case 0x00000001: // session
595 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
596 session_idx = amdgpu_vce_validate_handle(p, handle,
600 size = &p->adev->vce.img_size[session_idx];
603 case 0x00000002: // task info
604 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
605 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
608 case 0x01000001: // create
611 DRM_ERROR("Handle already in use!\n");
616 *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
617 amdgpu_get_ib_value(p, ib_idx, idx + 10) *
621 case 0x04000001: // config extension
622 case 0x04000002: // pic control
623 case 0x04000005: // rate control
624 case 0x04000007: // motion estimation
625 case 0x04000008: // rdo
626 case 0x04000009: // vui
627 case 0x05000002: // auxiliary buffer
630 case 0x03000001: // encode
631 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
636 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
642 case 0x02000001: // destroy
646 case 0x05000001: // context buffer
647 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
653 case 0x05000004: // video bitstream buffer
654 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
655 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
661 case 0x05000005: // feedback buffer
662 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
669 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
674 if (session_idx == -1) {
675 DRM_ERROR("no session command at start of IB\n");
683 if (allocated && !created) {
684 DRM_ERROR("New session without create command!\n");
689 if ((!r && destroyed) || (r && allocated)) {
691 * IB contains a destroy msg or we have allocated an
692 * handle and got an error, anyway free the handle
694 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
695 atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
702 * amdgpu_vce_ring_emit_semaphore - emit a semaphore command
704 * @ring: engine to use
705 * @semaphore: address of semaphore
706 * @emit_wait: true=emit wait, false=emit signal
709 bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
710 struct amdgpu_semaphore *semaphore,
713 uint64_t addr = semaphore->gpu_addr;
715 amdgpu_ring_write(ring, VCE_CMD_SEMAPHORE);
716 amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF);
717 amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF);
718 amdgpu_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0));
720 amdgpu_ring_write(ring, VCE_CMD_END);
726 * amdgpu_vce_ring_emit_ib - execute indirect buffer
728 * @ring: engine to use
729 * @ib: the IB to execute
732 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
734 amdgpu_ring_write(ring, VCE_CMD_IB);
735 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
736 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
737 amdgpu_ring_write(ring, ib->length_dw);
741 * amdgpu_vce_ring_emit_fence - add a fence command to the ring
743 * @ring: engine to use
747 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
750 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
752 amdgpu_ring_write(ring, VCE_CMD_FENCE);
753 amdgpu_ring_write(ring, addr);
754 amdgpu_ring_write(ring, upper_32_bits(addr));
755 amdgpu_ring_write(ring, seq);
756 amdgpu_ring_write(ring, VCE_CMD_TRAP);
757 amdgpu_ring_write(ring, VCE_CMD_END);
761 * amdgpu_vce_ring_test_ring - test if VCE ring is working
763 * @ring: the engine to test on
766 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
768 struct amdgpu_device *adev = ring->adev;
769 uint32_t rptr = amdgpu_ring_get_rptr(ring);
773 r = amdgpu_ring_lock(ring, 16);
775 DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
779 amdgpu_ring_write(ring, VCE_CMD_END);
780 amdgpu_ring_unlock_commit(ring);
782 for (i = 0; i < adev->usec_timeout; i++) {
783 if (amdgpu_ring_get_rptr(ring) != rptr)
788 if (i < adev->usec_timeout) {
789 DRM_INFO("ring test on %d succeeded in %d usecs\n",
792 DRM_ERROR("amdgpu: ring %d test failed\n",
801 * amdgpu_vce_ring_test_ib - test if VCE IBs are working
803 * @ring: the engine to test on
806 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
808 struct amdgpu_fence *fence = NULL;
811 r = amdgpu_vce_get_create_msg(ring, 1, NULL);
813 DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
817 r = amdgpu_vce_get_destroy_msg(ring, 1, &fence);
819 DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
823 r = amdgpu_fence_wait(fence, false);
825 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
827 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
830 amdgpu_fence_unref(&fence);