]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drm/amd: Delay removal of the firmware framebuffer
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vce.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  * Authors: Christian König <[email protected]>
26  */
27
28 #include <linux/firmware.h>
29 #include <linux/module.h>
30
31 #include <drm/drm.h>
32 #include <drm/drm_drv.h>
33
34 #include "amdgpu.h"
35 #include "amdgpu_pm.h"
36 #include "amdgpu_vce.h"
37 #include "amdgpu_cs.h"
38 #include "cikd.h"
39
40 /* 1 second timeout */
41 #define VCE_IDLE_TIMEOUT        msecs_to_jiffies(1000)
42
43 /* Firmware Names */
44 #ifdef CONFIG_DRM_AMDGPU_CIK
45 #define FIRMWARE_BONAIRE        "amdgpu/bonaire_vce.bin"
46 #define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
47 #define FIRMWARE_KAVERI "amdgpu/kaveri_vce.bin"
48 #define FIRMWARE_HAWAII "amdgpu/hawaii_vce.bin"
49 #define FIRMWARE_MULLINS        "amdgpu/mullins_vce.bin"
50 #endif
51 #define FIRMWARE_TONGA          "amdgpu/tonga_vce.bin"
52 #define FIRMWARE_CARRIZO        "amdgpu/carrizo_vce.bin"
53 #define FIRMWARE_FIJI           "amdgpu/fiji_vce.bin"
54 #define FIRMWARE_STONEY         "amdgpu/stoney_vce.bin"
55 #define FIRMWARE_POLARIS10      "amdgpu/polaris10_vce.bin"
56 #define FIRMWARE_POLARIS11      "amdgpu/polaris11_vce.bin"
57 #define FIRMWARE_POLARIS12      "amdgpu/polaris12_vce.bin"
58 #define FIRMWARE_VEGAM          "amdgpu/vegam_vce.bin"
59
60 #define FIRMWARE_VEGA10         "amdgpu/vega10_vce.bin"
61 #define FIRMWARE_VEGA12         "amdgpu/vega12_vce.bin"
62 #define FIRMWARE_VEGA20         "amdgpu/vega20_vce.bin"
63
64 #ifdef CONFIG_DRM_AMDGPU_CIK
65 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
66 MODULE_FIRMWARE(FIRMWARE_KABINI);
67 MODULE_FIRMWARE(FIRMWARE_KAVERI);
68 MODULE_FIRMWARE(FIRMWARE_HAWAII);
69 MODULE_FIRMWARE(FIRMWARE_MULLINS);
70 #endif
71 MODULE_FIRMWARE(FIRMWARE_TONGA);
72 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
73 MODULE_FIRMWARE(FIRMWARE_FIJI);
74 MODULE_FIRMWARE(FIRMWARE_STONEY);
75 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
76 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
77 MODULE_FIRMWARE(FIRMWARE_POLARIS12);
78 MODULE_FIRMWARE(FIRMWARE_VEGAM);
79
80 MODULE_FIRMWARE(FIRMWARE_VEGA10);
81 MODULE_FIRMWARE(FIRMWARE_VEGA12);
82 MODULE_FIRMWARE(FIRMWARE_VEGA20);
83
84 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
85 static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
86                                      struct dma_fence **fence);
87 static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
88                                       bool direct, struct dma_fence **fence);
89
90 /**
91  * amdgpu_vce_sw_init - allocate memory, load vce firmware
92  *
93  * @adev: amdgpu_device pointer
94  * @size: size for the new BO
95  *
96  * First step to get VCE online, allocate memory and load the firmware
97  */
98 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
99 {
100         const char *fw_name;
101         const struct common_firmware_header *hdr;
102         unsigned ucode_version, version_major, version_minor, binary_id;
103         int i, r;
104
105         switch (adev->asic_type) {
106 #ifdef CONFIG_DRM_AMDGPU_CIK
107         case CHIP_BONAIRE:
108                 fw_name = FIRMWARE_BONAIRE;
109                 break;
110         case CHIP_KAVERI:
111                 fw_name = FIRMWARE_KAVERI;
112                 break;
113         case CHIP_KABINI:
114                 fw_name = FIRMWARE_KABINI;
115                 break;
116         case CHIP_HAWAII:
117                 fw_name = FIRMWARE_HAWAII;
118                 break;
119         case CHIP_MULLINS:
120                 fw_name = FIRMWARE_MULLINS;
121                 break;
122 #endif
123         case CHIP_TONGA:
124                 fw_name = FIRMWARE_TONGA;
125                 break;
126         case CHIP_CARRIZO:
127                 fw_name = FIRMWARE_CARRIZO;
128                 break;
129         case CHIP_FIJI:
130                 fw_name = FIRMWARE_FIJI;
131                 break;
132         case CHIP_STONEY:
133                 fw_name = FIRMWARE_STONEY;
134                 break;
135         case CHIP_POLARIS10:
136                 fw_name = FIRMWARE_POLARIS10;
137                 break;
138         case CHIP_POLARIS11:
139                 fw_name = FIRMWARE_POLARIS11;
140                 break;
141         case CHIP_POLARIS12:
142                 fw_name = FIRMWARE_POLARIS12;
143                 break;
144         case CHIP_VEGAM:
145                 fw_name = FIRMWARE_VEGAM;
146                 break;
147         case CHIP_VEGA10:
148                 fw_name = FIRMWARE_VEGA10;
149                 break;
150         case CHIP_VEGA12:
151                 fw_name = FIRMWARE_VEGA12;
152                 break;
153         case CHIP_VEGA20:
154                 fw_name = FIRMWARE_VEGA20;
155                 break;
156
157         default:
158                 return -EINVAL;
159         }
160
161         r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
162         if (r) {
163                 dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
164                         fw_name);
165                 return r;
166         }
167
168         r = amdgpu_ucode_validate(adev->vce.fw);
169         if (r) {
170                 dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
171                         fw_name);
172                 release_firmware(adev->vce.fw);
173                 adev->vce.fw = NULL;
174                 return r;
175         }
176
177         hdr = (const struct common_firmware_header *)adev->vce.fw->data;
178
179         ucode_version = le32_to_cpu(hdr->ucode_version);
180         version_major = (ucode_version >> 20) & 0xfff;
181         version_minor = (ucode_version >> 8) & 0xfff;
182         binary_id = ucode_version & 0xff;
183         DRM_INFO("Found VCE firmware Version: %d.%d Binary ID: %d\n",
184                 version_major, version_minor, binary_id);
185         adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
186                                 (binary_id << 8));
187
188         r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
189                                     AMDGPU_GEM_DOMAIN_VRAM |
190                                     AMDGPU_GEM_DOMAIN_GTT,
191                                     &adev->vce.vcpu_bo,
192                                     &adev->vce.gpu_addr, &adev->vce.cpu_addr);
193         if (r) {
194                 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
195                 return r;
196         }
197
198         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
199                 atomic_set(&adev->vce.handles[i], 0);
200                 adev->vce.filp[i] = NULL;
201         }
202
203         INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
204         mutex_init(&adev->vce.idle_mutex);
205
206         return 0;
207 }
208
209 /**
210  * amdgpu_vce_sw_fini - free memory
211  *
212  * @adev: amdgpu_device pointer
213  *
214  * Last step on VCE teardown, free firmware memory
215  */
216 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
217 {
218         unsigned i;
219
220         if (adev->vce.vcpu_bo == NULL)
221                 return 0;
222
223         drm_sched_entity_destroy(&adev->vce.entity);
224
225         amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
226                 (void **)&adev->vce.cpu_addr);
227
228         for (i = 0; i < adev->vce.num_rings; i++)
229                 amdgpu_ring_fini(&adev->vce.ring[i]);
230
231         release_firmware(adev->vce.fw);
232         mutex_destroy(&adev->vce.idle_mutex);
233
234         return 0;
235 }
236
237 /**
238  * amdgpu_vce_entity_init - init entity
239  *
240  * @adev: amdgpu_device pointer
241  *
242  */
243 int amdgpu_vce_entity_init(struct amdgpu_device *adev)
244 {
245         struct amdgpu_ring *ring;
246         struct drm_gpu_scheduler *sched;
247         int r;
248
249         ring = &adev->vce.ring[0];
250         sched = &ring->sched;
251         r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
252                                   &sched, 1, NULL);
253         if (r != 0) {
254                 DRM_ERROR("Failed setting up VCE run queue.\n");
255                 return r;
256         }
257
258         return 0;
259 }
260
261 /**
262  * amdgpu_vce_suspend - unpin VCE fw memory
263  *
264  * @adev: amdgpu_device pointer
265  *
266  */
267 int amdgpu_vce_suspend(struct amdgpu_device *adev)
268 {
269         int i;
270
271         cancel_delayed_work_sync(&adev->vce.idle_work);
272
273         if (adev->vce.vcpu_bo == NULL)
274                 return 0;
275
276         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
277                 if (atomic_read(&adev->vce.handles[i]))
278                         break;
279
280         if (i == AMDGPU_MAX_VCE_HANDLES)
281                 return 0;
282
283         /* TODO: suspending running encoding sessions isn't supported */
284         return -EINVAL;
285 }
286
287 /**
288  * amdgpu_vce_resume - pin VCE fw memory
289  *
290  * @adev: amdgpu_device pointer
291  *
292  */
293 int amdgpu_vce_resume(struct amdgpu_device *adev)
294 {
295         void *cpu_addr;
296         const struct common_firmware_header *hdr;
297         unsigned offset;
298         int r, idx;
299
300         if (adev->vce.vcpu_bo == NULL)
301                 return -EINVAL;
302
303         r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
304         if (r) {
305                 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
306                 return r;
307         }
308
309         r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
310         if (r) {
311                 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
312                 dev_err(adev->dev, "(%d) VCE map failed\n", r);
313                 return r;
314         }
315
316         hdr = (const struct common_firmware_header *)adev->vce.fw->data;
317         offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
318
319         if (drm_dev_enter(adev_to_drm(adev), &idx)) {
320                 memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
321                             adev->vce.fw->size - offset);
322                 drm_dev_exit(idx);
323         }
324
325         amdgpu_bo_kunmap(adev->vce.vcpu_bo);
326
327         amdgpu_bo_unreserve(adev->vce.vcpu_bo);
328
329         return 0;
330 }
331
332 /**
333  * amdgpu_vce_idle_work_handler - power off VCE
334  *
335  * @work: pointer to work structure
336  *
337  * power of VCE when it's not used any more
338  */
339 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
340 {
341         struct amdgpu_device *adev =
342                 container_of(work, struct amdgpu_device, vce.idle_work.work);
343         unsigned i, count = 0;
344
345         for (i = 0; i < adev->vce.num_rings; i++)
346                 count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
347
348         if (count == 0) {
349                 if (adev->pm.dpm_enabled) {
350                         amdgpu_dpm_enable_vce(adev, false);
351                 } else {
352                         amdgpu_asic_set_vce_clocks(adev, 0, 0);
353                         amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
354                                                                AMD_PG_STATE_GATE);
355                         amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
356                                                                AMD_CG_STATE_GATE);
357                 }
358         } else {
359                 schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
360         }
361 }
362
363 /**
364  * amdgpu_vce_ring_begin_use - power up VCE
365  *
366  * @ring: amdgpu ring
367  *
368  * Make sure VCE is powerd up when we want to use it
369  */
370 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
371 {
372         struct amdgpu_device *adev = ring->adev;
373         bool set_clocks;
374
375         if (amdgpu_sriov_vf(adev))
376                 return;
377
378         mutex_lock(&adev->vce.idle_mutex);
379         set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
380         if (set_clocks) {
381                 if (adev->pm.dpm_enabled) {
382                         amdgpu_dpm_enable_vce(adev, true);
383                 } else {
384                         amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
385                         amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
386                                                                AMD_CG_STATE_UNGATE);
387                         amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
388                                                                AMD_PG_STATE_UNGATE);
389
390                 }
391         }
392         mutex_unlock(&adev->vce.idle_mutex);
393 }
394
395 /**
396  * amdgpu_vce_ring_end_use - power VCE down
397  *
398  * @ring: amdgpu ring
399  *
400  * Schedule work to power VCE down again
401  */
402 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
403 {
404         if (!amdgpu_sriov_vf(ring->adev))
405                 schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
406 }
407
408 /**
409  * amdgpu_vce_free_handles - free still open VCE handles
410  *
411  * @adev: amdgpu_device pointer
412  * @filp: drm file pointer
413  *
414  * Close all VCE handles still open by this file pointer
415  */
416 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
417 {
418         struct amdgpu_ring *ring = &adev->vce.ring[0];
419         int i, r;
420         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
421                 uint32_t handle = atomic_read(&adev->vce.handles[i]);
422
423                 if (!handle || adev->vce.filp[i] != filp)
424                         continue;
425
426                 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
427                 if (r)
428                         DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
429
430                 adev->vce.filp[i] = NULL;
431                 atomic_set(&adev->vce.handles[i], 0);
432         }
433 }
434
435 /**
436  * amdgpu_vce_get_create_msg - generate a VCE create msg
437  *
438  * @ring: ring we should submit the msg to
439  * @handle: VCE session handle to use
440  * @fence: optional fence to return
441  *
442  * Open up a stream for HW test
443  */
444 static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
445                                      struct dma_fence **fence)
446 {
447         const unsigned ib_size_dw = 1024;
448         struct amdgpu_job *job;
449         struct amdgpu_ib *ib;
450         struct amdgpu_ib ib_msg;
451         struct dma_fence *f = NULL;
452         uint64_t addr;
453         int i, r;
454
455         r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
456                                      AMDGPU_FENCE_OWNER_UNDEFINED,
457                                      ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
458                                      &job);
459         if (r)
460                 return r;
461
462         memset(&ib_msg, 0, sizeof(ib_msg));
463         /* only one gpu page is needed, alloc +1 page to make addr aligned. */
464         r = amdgpu_ib_get(ring->adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
465                           AMDGPU_IB_POOL_DIRECT,
466                           &ib_msg);
467         if (r)
468                 goto err;
469
470         ib = &job->ibs[0];
471         /* let addr point to page boundary */
472         addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg.gpu_addr);
473
474         /* stitch together an VCE create msg */
475         ib->length_dw = 0;
476         ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
477         ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
478         ib->ptr[ib->length_dw++] = handle;
479
480         if ((ring->adev->vce.fw_version >> 24) >= 52)
481                 ib->ptr[ib->length_dw++] = 0x00000040; /* len */
482         else
483                 ib->ptr[ib->length_dw++] = 0x00000030; /* len */
484         ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
485         ib->ptr[ib->length_dw++] = 0x00000000;
486         ib->ptr[ib->length_dw++] = 0x00000042;
487         ib->ptr[ib->length_dw++] = 0x0000000a;
488         ib->ptr[ib->length_dw++] = 0x00000001;
489         ib->ptr[ib->length_dw++] = 0x00000080;
490         ib->ptr[ib->length_dw++] = 0x00000060;
491         ib->ptr[ib->length_dw++] = 0x00000100;
492         ib->ptr[ib->length_dw++] = 0x00000100;
493         ib->ptr[ib->length_dw++] = 0x0000000c;
494         ib->ptr[ib->length_dw++] = 0x00000000;
495         if ((ring->adev->vce.fw_version >> 24) >= 52) {
496                 ib->ptr[ib->length_dw++] = 0x00000000;
497                 ib->ptr[ib->length_dw++] = 0x00000000;
498                 ib->ptr[ib->length_dw++] = 0x00000000;
499                 ib->ptr[ib->length_dw++] = 0x00000000;
500         }
501
502         ib->ptr[ib->length_dw++] = 0x00000014; /* len */
503         ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
504         ib->ptr[ib->length_dw++] = upper_32_bits(addr);
505         ib->ptr[ib->length_dw++] = addr;
506         ib->ptr[ib->length_dw++] = 0x00000001;
507
508         for (i = ib->length_dw; i < ib_size_dw; ++i)
509                 ib->ptr[i] = 0x0;
510
511         r = amdgpu_job_submit_direct(job, ring, &f);
512         amdgpu_ib_free(ring->adev, &ib_msg, f);
513         if (r)
514                 goto err;
515
516         if (fence)
517                 *fence = dma_fence_get(f);
518         dma_fence_put(f);
519         return 0;
520
521 err:
522         amdgpu_job_free(job);
523         return r;
524 }
525
526 /**
527  * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
528  *
529  * @ring: ring we should submit the msg to
530  * @handle: VCE session handle to use
531  * @direct: direct or delayed pool
532  * @fence: optional fence to return
533  *
534  * Close up a stream for HW test or if userspace failed to do so
535  */
536 static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
537                                       bool direct, struct dma_fence **fence)
538 {
539         const unsigned ib_size_dw = 1024;
540         struct amdgpu_job *job;
541         struct amdgpu_ib *ib;
542         struct dma_fence *f = NULL;
543         int i, r;
544
545         r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
546                                      AMDGPU_FENCE_OWNER_UNDEFINED,
547                                      ib_size_dw * 4,
548                                      direct ? AMDGPU_IB_POOL_DIRECT :
549                                      AMDGPU_IB_POOL_DELAYED, &job);
550         if (r)
551                 return r;
552
553         ib = &job->ibs[0];
554
555         /* stitch together an VCE destroy msg */
556         ib->length_dw = 0;
557         ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
558         ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
559         ib->ptr[ib->length_dw++] = handle;
560
561         ib->ptr[ib->length_dw++] = 0x00000020; /* len */
562         ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
563         ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
564         ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
565         ib->ptr[ib->length_dw++] = 0x00000000;
566         ib->ptr[ib->length_dw++] = 0x00000000;
567         ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
568         ib->ptr[ib->length_dw++] = 0x00000000;
569
570         ib->ptr[ib->length_dw++] = 0x00000008; /* len */
571         ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
572
573         for (i = ib->length_dw; i < ib_size_dw; ++i)
574                 ib->ptr[i] = 0x0;
575
576         if (direct)
577                 r = amdgpu_job_submit_direct(job, ring, &f);
578         else
579                 f = amdgpu_job_submit(job);
580         if (r)
581                 goto err;
582
583         if (fence)
584                 *fence = dma_fence_get(f);
585         dma_fence_put(f);
586         return 0;
587
588 err:
589         amdgpu_job_free(job);
590         return r;
591 }
592
593 /**
594  * amdgpu_vce_validate_bo - make sure not to cross 4GB boundary
595  *
596  * @ib: indirect buffer to use
597  * @lo: address of lower dword
598  * @hi: address of higher dword
599  * @size: minimum size
600  * @index: bs/fb index
601  *
602  * Make sure that no BO cross a 4GB boundary.
603  */
604 static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p,
605                                   struct amdgpu_ib *ib, int lo, int hi,
606                                   unsigned size, int32_t index)
607 {
608         int64_t offset = ((uint64_t)size) * ((int64_t)index);
609         struct ttm_operation_ctx ctx = { false, false };
610         struct amdgpu_bo_va_mapping *mapping;
611         unsigned i, fpfn, lpfn;
612         struct amdgpu_bo *bo;
613         uint64_t addr;
614         int r;
615
616         addr = ((uint64_t)amdgpu_ib_get_value(ib, lo)) |
617                ((uint64_t)amdgpu_ib_get_value(ib, hi)) << 32;
618         if (index >= 0) {
619                 addr += offset;
620                 fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT;
621                 lpfn = 0x100000000ULL >> PAGE_SHIFT;
622         } else {
623                 fpfn = 0;
624                 lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT;
625         }
626
627         r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
628         if (r) {
629                 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
630                           addr, lo, hi, size, index);
631                 return r;
632         }
633
634         for (i = 0; i < bo->placement.num_placement; ++i) {
635                 bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn);
636                 bo->placements[i].lpfn = bo->placements[i].lpfn ?
637                         min(bo->placements[i].lpfn, lpfn) : lpfn;
638         }
639         return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
640 }
641
642
643 /**
644  * amdgpu_vce_cs_reloc - command submission relocation
645  *
646  * @p: parser context
647  * @ib: indirect buffer to use
648  * @lo: address of lower dword
649  * @hi: address of higher dword
650  * @size: minimum size
651  * @index: bs/fb index
652  *
653  * Patch relocation inside command stream with real buffer address
654  */
655 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, struct amdgpu_ib *ib,
656                                int lo, int hi, unsigned size, uint32_t index)
657 {
658         struct amdgpu_bo_va_mapping *mapping;
659         struct amdgpu_bo *bo;
660         uint64_t addr;
661         int r;
662
663         if (index == 0xffffffff)
664                 index = 0;
665
666         addr = ((uint64_t)amdgpu_ib_get_value(ib, lo)) |
667                ((uint64_t)amdgpu_ib_get_value(ib, hi)) << 32;
668         addr += ((uint64_t)size) * ((uint64_t)index);
669
670         r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
671         if (r) {
672                 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
673                           addr, lo, hi, size, index);
674                 return r;
675         }
676
677         if ((addr + (uint64_t)size) >
678             (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
679                 DRM_ERROR("BO too small for addr 0x%010Lx %d %d\n",
680                           addr, lo, hi);
681                 return -EINVAL;
682         }
683
684         addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
685         addr += amdgpu_bo_gpu_offset(bo);
686         addr -= ((uint64_t)size) * ((uint64_t)index);
687
688         amdgpu_ib_set_value(ib, lo, lower_32_bits(addr));
689         amdgpu_ib_set_value(ib, hi, upper_32_bits(addr));
690
691         return 0;
692 }
693
694 /**
695  * amdgpu_vce_validate_handle - validate stream handle
696  *
697  * @p: parser context
698  * @handle: handle to validate
699  * @allocated: allocated a new handle?
700  *
701  * Validates the handle and return the found session index or -EINVAL
702  * we we don't have another free session index.
703  */
704 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
705                                       uint32_t handle, uint32_t *allocated)
706 {
707         unsigned i;
708
709         /* validate the handle */
710         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
711                 if (atomic_read(&p->adev->vce.handles[i]) == handle) {
712                         if (p->adev->vce.filp[i] != p->filp) {
713                                 DRM_ERROR("VCE handle collision detected!\n");
714                                 return -EINVAL;
715                         }
716                         return i;
717                 }
718         }
719
720         /* handle not found try to alloc a new one */
721         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
722                 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
723                         p->adev->vce.filp[i] = p->filp;
724                         p->adev->vce.img_size[i] = 0;
725                         *allocated |= 1 << i;
726                         return i;
727                 }
728         }
729
730         DRM_ERROR("No more free VCE handles!\n");
731         return -EINVAL;
732 }
733
734 /**
735  * amdgpu_vce_ring_parse_cs - parse and validate the command stream
736  *
737  * @p: parser context
738  * @job: the job to parse
739  * @ib: the IB to patch
740  */
741 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p,
742                              struct amdgpu_job *job,
743                              struct amdgpu_ib *ib)
744 {
745         unsigned fb_idx = 0, bs_idx = 0;
746         int session_idx = -1;
747         uint32_t destroyed = 0;
748         uint32_t created = 0;
749         uint32_t allocated = 0;
750         uint32_t tmp, handle = 0;
751         uint32_t *size = &tmp;
752         unsigned idx;
753         int i, r = 0;
754
755         job->vm = NULL;
756         ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
757
758         for (idx = 0; idx < ib->length_dw;) {
759                 uint32_t len = amdgpu_ib_get_value(ib, idx);
760                 uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
761
762                 if ((len < 8) || (len & 3)) {
763                         DRM_ERROR("invalid VCE command length (%d)!\n", len);
764                         r = -EINVAL;
765                         goto out;
766                 }
767
768                 switch (cmd) {
769                 case 0x00000002: /* task info */
770                         fb_idx = amdgpu_ib_get_value(ib, idx + 6);
771                         bs_idx = amdgpu_ib_get_value(ib, idx + 7);
772                         break;
773
774                 case 0x03000001: /* encode */
775                         r = amdgpu_vce_validate_bo(p, ib, idx + 10, idx + 9,
776                                                    0, 0);
777                         if (r)
778                                 goto out;
779
780                         r = amdgpu_vce_validate_bo(p, ib, idx + 12, idx + 11,
781                                                    0, 0);
782                         if (r)
783                                 goto out;
784                         break;
785
786                 case 0x05000001: /* context buffer */
787                         r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
788                                                    0, 0);
789                         if (r)
790                                 goto out;
791                         break;
792
793                 case 0x05000004: /* video bitstream buffer */
794                         tmp = amdgpu_ib_get_value(ib, idx + 4);
795                         r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
796                                                    tmp, bs_idx);
797                         if (r)
798                                 goto out;
799                         break;
800
801                 case 0x05000005: /* feedback buffer */
802                         r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
803                                                    4096, fb_idx);
804                         if (r)
805                                 goto out;
806                         break;
807
808                 case 0x0500000d: /* MV buffer */
809                         r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
810                                                    0, 0);
811                         if (r)
812                                 goto out;
813
814                         r = amdgpu_vce_validate_bo(p, ib, idx + 8, idx + 7,
815                                                    0, 0);
816                         if (r)
817                                 goto out;
818                         break;
819                 }
820
821                 idx += len / 4;
822         }
823
824         for (idx = 0; idx < ib->length_dw;) {
825                 uint32_t len = amdgpu_ib_get_value(ib, idx);
826                 uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
827
828                 switch (cmd) {
829                 case 0x00000001: /* session */
830                         handle = amdgpu_ib_get_value(ib, idx + 2);
831                         session_idx = amdgpu_vce_validate_handle(p, handle,
832                                                                  &allocated);
833                         if (session_idx < 0) {
834                                 r = session_idx;
835                                 goto out;
836                         }
837                         size = &p->adev->vce.img_size[session_idx];
838                         break;
839
840                 case 0x00000002: /* task info */
841                         fb_idx = amdgpu_ib_get_value(ib, idx + 6);
842                         bs_idx = amdgpu_ib_get_value(ib, idx + 7);
843                         break;
844
845                 case 0x01000001: /* create */
846                         created |= 1 << session_idx;
847                         if (destroyed & (1 << session_idx)) {
848                                 destroyed &= ~(1 << session_idx);
849                                 allocated |= 1 << session_idx;
850
851                         } else if (!(allocated & (1 << session_idx))) {
852                                 DRM_ERROR("Handle already in use!\n");
853                                 r = -EINVAL;
854                                 goto out;
855                         }
856
857                         *size = amdgpu_ib_get_value(ib, idx + 8) *
858                                 amdgpu_ib_get_value(ib, idx + 10) *
859                                 8 * 3 / 2;
860                         break;
861
862                 case 0x04000001: /* config extension */
863                 case 0x04000002: /* pic control */
864                 case 0x04000005: /* rate control */
865                 case 0x04000007: /* motion estimation */
866                 case 0x04000008: /* rdo */
867                 case 0x04000009: /* vui */
868                 case 0x05000002: /* auxiliary buffer */
869                 case 0x05000009: /* clock table */
870                         break;
871
872                 case 0x0500000c: /* hw config */
873                         switch (p->adev->asic_type) {
874 #ifdef CONFIG_DRM_AMDGPU_CIK
875                         case CHIP_KAVERI:
876                         case CHIP_MULLINS:
877 #endif
878                         case CHIP_CARRIZO:
879                                 break;
880                         default:
881                                 r = -EINVAL;
882                                 goto out;
883                         }
884                         break;
885
886                 case 0x03000001: /* encode */
887                         r = amdgpu_vce_cs_reloc(p, ib, idx + 10, idx + 9,
888                                                 *size, 0);
889                         if (r)
890                                 goto out;
891
892                         r = amdgpu_vce_cs_reloc(p, ib, idx + 12, idx + 11,
893                                                 *size / 3, 0);
894                         if (r)
895                                 goto out;
896                         break;
897
898                 case 0x02000001: /* destroy */
899                         destroyed |= 1 << session_idx;
900                         break;
901
902                 case 0x05000001: /* context buffer */
903                         r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
904                                                 *size * 2, 0);
905                         if (r)
906                                 goto out;
907                         break;
908
909                 case 0x05000004: /* video bitstream buffer */
910                         tmp = amdgpu_ib_get_value(ib, idx + 4);
911                         r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
912                                                 tmp, bs_idx);
913                         if (r)
914                                 goto out;
915                         break;
916
917                 case 0x05000005: /* feedback buffer */
918                         r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
919                                                 4096, fb_idx);
920                         if (r)
921                                 goto out;
922                         break;
923
924                 case 0x0500000d: /* MV buffer */
925                         r = amdgpu_vce_cs_reloc(p, ib, idx + 3,
926                                                 idx + 2, *size, 0);
927                         if (r)
928                                 goto out;
929
930                         r = amdgpu_vce_cs_reloc(p, ib, idx + 8,
931                                                 idx + 7, *size / 12, 0);
932                         if (r)
933                                 goto out;
934                         break;
935
936                 default:
937                         DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
938                         r = -EINVAL;
939                         goto out;
940                 }
941
942                 if (session_idx == -1) {
943                         DRM_ERROR("no session command at start of IB\n");
944                         r = -EINVAL;
945                         goto out;
946                 }
947
948                 idx += len / 4;
949         }
950
951         if (allocated & ~created) {
952                 DRM_ERROR("New session without create command!\n");
953                 r = -ENOENT;
954         }
955
956 out:
957         if (!r) {
958                 /* No error, free all destroyed handle slots */
959                 tmp = destroyed;
960         } else {
961                 /* Error during parsing, free all allocated handle slots */
962                 tmp = allocated;
963         }
964
965         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
966                 if (tmp & (1 << i))
967                         atomic_set(&p->adev->vce.handles[i], 0);
968
969         return r;
970 }
971
972 /**
973  * amdgpu_vce_ring_parse_cs_vm - parse the command stream in VM mode
974  *
975  * @p: parser context
976  * @job: the job to parse
977  * @ib: the IB to patch
978  */
979 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p,
980                                 struct amdgpu_job *job,
981                                 struct amdgpu_ib *ib)
982 {
983         int session_idx = -1;
984         uint32_t destroyed = 0;
985         uint32_t created = 0;
986         uint32_t allocated = 0;
987         uint32_t tmp, handle = 0;
988         int i, r = 0, idx = 0;
989
990         while (idx < ib->length_dw) {
991                 uint32_t len = amdgpu_ib_get_value(ib, idx);
992                 uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
993
994                 if ((len < 8) || (len & 3)) {
995                         DRM_ERROR("invalid VCE command length (%d)!\n", len);
996                         r = -EINVAL;
997                         goto out;
998                 }
999
1000                 switch (cmd) {
1001                 case 0x00000001: /* session */
1002                         handle = amdgpu_ib_get_value(ib, idx + 2);
1003                         session_idx = amdgpu_vce_validate_handle(p, handle,
1004                                                                  &allocated);
1005                         if (session_idx < 0) {
1006                                 r = session_idx;
1007                                 goto out;
1008                         }
1009                         break;
1010
1011                 case 0x01000001: /* create */
1012                         created |= 1 << session_idx;
1013                         if (destroyed & (1 << session_idx)) {
1014                                 destroyed &= ~(1 << session_idx);
1015                                 allocated |= 1 << session_idx;
1016
1017                         } else if (!(allocated & (1 << session_idx))) {
1018                                 DRM_ERROR("Handle already in use!\n");
1019                                 r = -EINVAL;
1020                                 goto out;
1021                         }
1022
1023                         break;
1024
1025                 case 0x02000001: /* destroy */
1026                         destroyed |= 1 << session_idx;
1027                         break;
1028
1029                 default:
1030                         break;
1031                 }
1032
1033                 if (session_idx == -1) {
1034                         DRM_ERROR("no session command at start of IB\n");
1035                         r = -EINVAL;
1036                         goto out;
1037                 }
1038
1039                 idx += len / 4;
1040         }
1041
1042         if (allocated & ~created) {
1043                 DRM_ERROR("New session without create command!\n");
1044                 r = -ENOENT;
1045         }
1046
1047 out:
1048         if (!r) {
1049                 /* No error, free all destroyed handle slots */
1050                 tmp = destroyed;
1051                 amdgpu_ib_free(p->adev, ib, NULL);
1052         } else {
1053                 /* Error during parsing, free all allocated handle slots */
1054                 tmp = allocated;
1055         }
1056
1057         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
1058                 if (tmp & (1 << i))
1059                         atomic_set(&p->adev->vce.handles[i], 0);
1060
1061         return r;
1062 }
1063
1064 /**
1065  * amdgpu_vce_ring_emit_ib - execute indirect buffer
1066  *
1067  * @ring: engine to use
1068  * @job: job to retrieve vmid from
1069  * @ib: the IB to execute
1070  * @flags: unused
1071  *
1072  */
1073 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
1074                                 struct amdgpu_job *job,
1075                                 struct amdgpu_ib *ib,
1076                                 uint32_t flags)
1077 {
1078         amdgpu_ring_write(ring, VCE_CMD_IB);
1079         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1080         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1081         amdgpu_ring_write(ring, ib->length_dw);
1082 }
1083
1084 /**
1085  * amdgpu_vce_ring_emit_fence - add a fence command to the ring
1086  *
1087  * @ring: engine to use
1088  * @addr: address
1089  * @seq: sequence number
1090  * @flags: fence related flags
1091  *
1092  */
1093 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1094                                 unsigned flags)
1095 {
1096         WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1097
1098         amdgpu_ring_write(ring, VCE_CMD_FENCE);
1099         amdgpu_ring_write(ring, addr);
1100         amdgpu_ring_write(ring, upper_32_bits(addr));
1101         amdgpu_ring_write(ring, seq);
1102         amdgpu_ring_write(ring, VCE_CMD_TRAP);
1103         amdgpu_ring_write(ring, VCE_CMD_END);
1104 }
1105
1106 /**
1107  * amdgpu_vce_ring_test_ring - test if VCE ring is working
1108  *
1109  * @ring: the engine to test on
1110  *
1111  */
1112 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
1113 {
1114         struct amdgpu_device *adev = ring->adev;
1115         uint32_t rptr;
1116         unsigned i;
1117         int r, timeout = adev->usec_timeout;
1118
1119         /* skip ring test for sriov*/
1120         if (amdgpu_sriov_vf(adev))
1121                 return 0;
1122
1123         r = amdgpu_ring_alloc(ring, 16);
1124         if (r)
1125                 return r;
1126
1127         rptr = amdgpu_ring_get_rptr(ring);
1128
1129         amdgpu_ring_write(ring, VCE_CMD_END);
1130         amdgpu_ring_commit(ring);
1131
1132         for (i = 0; i < timeout; i++) {
1133                 if (amdgpu_ring_get_rptr(ring) != rptr)
1134                         break;
1135                 udelay(1);
1136         }
1137
1138         if (i >= timeout)
1139                 r = -ETIMEDOUT;
1140
1141         return r;
1142 }
1143
1144 /**
1145  * amdgpu_vce_ring_test_ib - test if VCE IBs are working
1146  *
1147  * @ring: the engine to test on
1148  * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
1149  *
1150  */
1151 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1152 {
1153         struct dma_fence *fence = NULL;
1154         long r;
1155
1156         /* skip vce ring1/2 ib test for now, since it's not reliable */
1157         if (ring != &ring->adev->vce.ring[0])
1158                 return 0;
1159
1160         r = amdgpu_vce_get_create_msg(ring, 1, NULL);
1161         if (r)
1162                 goto error;
1163
1164         r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
1165         if (r)
1166                 goto error;
1167
1168         r = dma_fence_wait_timeout(fence, false, timeout);
1169         if (r == 0)
1170                 r = -ETIMEDOUT;
1171         else if (r > 0)
1172                 r = 0;
1173
1174 error:
1175         dma_fence_put(fence);
1176         return r;
1177 }
1178
1179 enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring)
1180 {
1181         switch(ring) {
1182         case 0:
1183                 return AMDGPU_RING_PRIO_0;
1184         case 1:
1185                 return AMDGPU_RING_PRIO_1;
1186         case 2:
1187                 return AMDGPU_RING_PRIO_2;
1188         default:
1189                 return AMDGPU_RING_PRIO_0;
1190         }
1191 }
This page took 0.10714 seconds and 4 git commands to generate.