]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
Merge tag 'media/v6.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vce.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  * Authors: Christian König <[email protected]>
26  */
27
28 #include <linux/firmware.h>
29 #include <linux/module.h>
30
31 #include <drm/drm.h>
32 #include <drm/drm_drv.h>
33
34 #include "amdgpu.h"
35 #include "amdgpu_pm.h"
36 #include "amdgpu_vce.h"
37 #include "amdgpu_cs.h"
38 #include "cikd.h"
39
40 /* 1 second timeout */
41 #define VCE_IDLE_TIMEOUT        msecs_to_jiffies(1000)
42
43 /* Firmware Names */
44 #ifdef CONFIG_DRM_AMDGPU_CIK
45 #define FIRMWARE_BONAIRE        "amdgpu/bonaire_vce.bin"
46 #define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
47 #define FIRMWARE_KAVERI "amdgpu/kaveri_vce.bin"
48 #define FIRMWARE_HAWAII "amdgpu/hawaii_vce.bin"
49 #define FIRMWARE_MULLINS        "amdgpu/mullins_vce.bin"
50 #endif
51 #define FIRMWARE_TONGA          "amdgpu/tonga_vce.bin"
52 #define FIRMWARE_CARRIZO        "amdgpu/carrizo_vce.bin"
53 #define FIRMWARE_FIJI           "amdgpu/fiji_vce.bin"
54 #define FIRMWARE_STONEY         "amdgpu/stoney_vce.bin"
55 #define FIRMWARE_POLARIS10      "amdgpu/polaris10_vce.bin"
56 #define FIRMWARE_POLARIS11      "amdgpu/polaris11_vce.bin"
57 #define FIRMWARE_POLARIS12      "amdgpu/polaris12_vce.bin"
58 #define FIRMWARE_VEGAM          "amdgpu/vegam_vce.bin"
59
60 #define FIRMWARE_VEGA10         "amdgpu/vega10_vce.bin"
61 #define FIRMWARE_VEGA12         "amdgpu/vega12_vce.bin"
62 #define FIRMWARE_VEGA20         "amdgpu/vega20_vce.bin"
63
64 #ifdef CONFIG_DRM_AMDGPU_CIK
65 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
66 MODULE_FIRMWARE(FIRMWARE_KABINI);
67 MODULE_FIRMWARE(FIRMWARE_KAVERI);
68 MODULE_FIRMWARE(FIRMWARE_HAWAII);
69 MODULE_FIRMWARE(FIRMWARE_MULLINS);
70 #endif
71 MODULE_FIRMWARE(FIRMWARE_TONGA);
72 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
73 MODULE_FIRMWARE(FIRMWARE_FIJI);
74 MODULE_FIRMWARE(FIRMWARE_STONEY);
75 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
76 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
77 MODULE_FIRMWARE(FIRMWARE_POLARIS12);
78 MODULE_FIRMWARE(FIRMWARE_VEGAM);
79
80 MODULE_FIRMWARE(FIRMWARE_VEGA10);
81 MODULE_FIRMWARE(FIRMWARE_VEGA12);
82 MODULE_FIRMWARE(FIRMWARE_VEGA20);
83
84 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
85 static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
86                                      struct dma_fence **fence);
87 static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
88                                       bool direct, struct dma_fence **fence);
89
90 /**
91  * amdgpu_vce_sw_init - allocate memory, load vce firmware
92  *
93  * @adev: amdgpu_device pointer
94  * @size: size for the new BO
95  *
96  * First step to get VCE online, allocate memory and load the firmware
97  */
98 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
99 {
100         const char *fw_name;
101         const struct common_firmware_header *hdr;
102         unsigned ucode_version, version_major, version_minor, binary_id;
103         int i, r;
104
105         switch (adev->asic_type) {
106 #ifdef CONFIG_DRM_AMDGPU_CIK
107         case CHIP_BONAIRE:
108                 fw_name = FIRMWARE_BONAIRE;
109                 break;
110         case CHIP_KAVERI:
111                 fw_name = FIRMWARE_KAVERI;
112                 break;
113         case CHIP_KABINI:
114                 fw_name = FIRMWARE_KABINI;
115                 break;
116         case CHIP_HAWAII:
117                 fw_name = FIRMWARE_HAWAII;
118                 break;
119         case CHIP_MULLINS:
120                 fw_name = FIRMWARE_MULLINS;
121                 break;
122 #endif
123         case CHIP_TONGA:
124                 fw_name = FIRMWARE_TONGA;
125                 break;
126         case CHIP_CARRIZO:
127                 fw_name = FIRMWARE_CARRIZO;
128                 break;
129         case CHIP_FIJI:
130                 fw_name = FIRMWARE_FIJI;
131                 break;
132         case CHIP_STONEY:
133                 fw_name = FIRMWARE_STONEY;
134                 break;
135         case CHIP_POLARIS10:
136                 fw_name = FIRMWARE_POLARIS10;
137                 break;
138         case CHIP_POLARIS11:
139                 fw_name = FIRMWARE_POLARIS11;
140                 break;
141         case CHIP_POLARIS12:
142                 fw_name = FIRMWARE_POLARIS12;
143                 break;
144         case CHIP_VEGAM:
145                 fw_name = FIRMWARE_VEGAM;
146                 break;
147         case CHIP_VEGA10:
148                 fw_name = FIRMWARE_VEGA10;
149                 break;
150         case CHIP_VEGA12:
151                 fw_name = FIRMWARE_VEGA12;
152                 break;
153         case CHIP_VEGA20:
154                 fw_name = FIRMWARE_VEGA20;
155                 break;
156
157         default:
158                 return -EINVAL;
159         }
160
161         r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
162         if (r) {
163                 dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
164                         fw_name);
165                 return r;
166         }
167
168         r = amdgpu_ucode_validate(adev->vce.fw);
169         if (r) {
170                 dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
171                         fw_name);
172                 release_firmware(adev->vce.fw);
173                 adev->vce.fw = NULL;
174                 return r;
175         }
176
177         hdr = (const struct common_firmware_header *)adev->vce.fw->data;
178
179         ucode_version = le32_to_cpu(hdr->ucode_version);
180         version_major = (ucode_version >> 20) & 0xfff;
181         version_minor = (ucode_version >> 8) & 0xfff;
182         binary_id = ucode_version & 0xff;
183         DRM_INFO("Found VCE firmware Version: %d.%d Binary ID: %d\n",
184                 version_major, version_minor, binary_id);
185         adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
186                                 (binary_id << 8));
187
188         r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
189                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->vce.vcpu_bo,
190                                     &adev->vce.gpu_addr, &adev->vce.cpu_addr);
191         if (r) {
192                 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
193                 return r;
194         }
195
196         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
197                 atomic_set(&adev->vce.handles[i], 0);
198                 adev->vce.filp[i] = NULL;
199         }
200
201         INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
202         mutex_init(&adev->vce.idle_mutex);
203
204         return 0;
205 }
206
207 /**
208  * amdgpu_vce_sw_fini - free memory
209  *
210  * @adev: amdgpu_device pointer
211  *
212  * Last step on VCE teardown, free firmware memory
213  */
214 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
215 {
216         unsigned i;
217
218         if (adev->vce.vcpu_bo == NULL)
219                 return 0;
220
221         drm_sched_entity_destroy(&adev->vce.entity);
222
223         amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
224                 (void **)&adev->vce.cpu_addr);
225
226         for (i = 0; i < adev->vce.num_rings; i++)
227                 amdgpu_ring_fini(&adev->vce.ring[i]);
228
229         release_firmware(adev->vce.fw);
230         mutex_destroy(&adev->vce.idle_mutex);
231
232         return 0;
233 }
234
235 /**
236  * amdgpu_vce_entity_init - init entity
237  *
238  * @adev: amdgpu_device pointer
239  *
240  */
241 int amdgpu_vce_entity_init(struct amdgpu_device *adev)
242 {
243         struct amdgpu_ring *ring;
244         struct drm_gpu_scheduler *sched;
245         int r;
246
247         ring = &adev->vce.ring[0];
248         sched = &ring->sched;
249         r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
250                                   &sched, 1, NULL);
251         if (r != 0) {
252                 DRM_ERROR("Failed setting up VCE run queue.\n");
253                 return r;
254         }
255
256         return 0;
257 }
258
259 /**
260  * amdgpu_vce_suspend - unpin VCE fw memory
261  *
262  * @adev: amdgpu_device pointer
263  *
264  */
265 int amdgpu_vce_suspend(struct amdgpu_device *adev)
266 {
267         int i;
268
269         cancel_delayed_work_sync(&adev->vce.idle_work);
270
271         if (adev->vce.vcpu_bo == NULL)
272                 return 0;
273
274         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
275                 if (atomic_read(&adev->vce.handles[i]))
276                         break;
277
278         if (i == AMDGPU_MAX_VCE_HANDLES)
279                 return 0;
280
281         /* TODO: suspending running encoding sessions isn't supported */
282         return -EINVAL;
283 }
284
285 /**
286  * amdgpu_vce_resume - pin VCE fw memory
287  *
288  * @adev: amdgpu_device pointer
289  *
290  */
291 int amdgpu_vce_resume(struct amdgpu_device *adev)
292 {
293         void *cpu_addr;
294         const struct common_firmware_header *hdr;
295         unsigned offset;
296         int r, idx;
297
298         if (adev->vce.vcpu_bo == NULL)
299                 return -EINVAL;
300
301         r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
302         if (r) {
303                 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
304                 return r;
305         }
306
307         r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
308         if (r) {
309                 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
310                 dev_err(adev->dev, "(%d) VCE map failed\n", r);
311                 return r;
312         }
313
314         hdr = (const struct common_firmware_header *)adev->vce.fw->data;
315         offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
316
317         if (drm_dev_enter(adev_to_drm(adev), &idx)) {
318                 memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
319                             adev->vce.fw->size - offset);
320                 drm_dev_exit(idx);
321         }
322
323         amdgpu_bo_kunmap(adev->vce.vcpu_bo);
324
325         amdgpu_bo_unreserve(adev->vce.vcpu_bo);
326
327         return 0;
328 }
329
330 /**
331  * amdgpu_vce_idle_work_handler - power off VCE
332  *
333  * @work: pointer to work structure
334  *
335  * power of VCE when it's not used any more
336  */
337 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
338 {
339         struct amdgpu_device *adev =
340                 container_of(work, struct amdgpu_device, vce.idle_work.work);
341         unsigned i, count = 0;
342
343         for (i = 0; i < adev->vce.num_rings; i++)
344                 count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
345
346         if (count == 0) {
347                 if (adev->pm.dpm_enabled) {
348                         amdgpu_dpm_enable_vce(adev, false);
349                 } else {
350                         amdgpu_asic_set_vce_clocks(adev, 0, 0);
351                         amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
352                                                                AMD_PG_STATE_GATE);
353                         amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
354                                                                AMD_CG_STATE_GATE);
355                 }
356         } else {
357                 schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
358         }
359 }
360
361 /**
362  * amdgpu_vce_ring_begin_use - power up VCE
363  *
364  * @ring: amdgpu ring
365  *
366  * Make sure VCE is powerd up when we want to use it
367  */
368 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
369 {
370         struct amdgpu_device *adev = ring->adev;
371         bool set_clocks;
372
373         if (amdgpu_sriov_vf(adev))
374                 return;
375
376         mutex_lock(&adev->vce.idle_mutex);
377         set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
378         if (set_clocks) {
379                 if (adev->pm.dpm_enabled) {
380                         amdgpu_dpm_enable_vce(adev, true);
381                 } else {
382                         amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
383                         amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
384                                                                AMD_CG_STATE_UNGATE);
385                         amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
386                                                                AMD_PG_STATE_UNGATE);
387
388                 }
389         }
390         mutex_unlock(&adev->vce.idle_mutex);
391 }
392
393 /**
394  * amdgpu_vce_ring_end_use - power VCE down
395  *
396  * @ring: amdgpu ring
397  *
398  * Schedule work to power VCE down again
399  */
400 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
401 {
402         if (!amdgpu_sriov_vf(ring->adev))
403                 schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
404 }
405
406 /**
407  * amdgpu_vce_free_handles - free still open VCE handles
408  *
409  * @adev: amdgpu_device pointer
410  * @filp: drm file pointer
411  *
412  * Close all VCE handles still open by this file pointer
413  */
414 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
415 {
416         struct amdgpu_ring *ring = &adev->vce.ring[0];
417         int i, r;
418         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
419                 uint32_t handle = atomic_read(&adev->vce.handles[i]);
420
421                 if (!handle || adev->vce.filp[i] != filp)
422                         continue;
423
424                 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
425                 if (r)
426                         DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
427
428                 adev->vce.filp[i] = NULL;
429                 atomic_set(&adev->vce.handles[i], 0);
430         }
431 }
432
433 /**
434  * amdgpu_vce_get_create_msg - generate a VCE create msg
435  *
436  * @ring: ring we should submit the msg to
437  * @handle: VCE session handle to use
438  * @fence: optional fence to return
439  *
440  * Open up a stream for HW test
441  */
442 static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
443                                      struct dma_fence **fence)
444 {
445         const unsigned ib_size_dw = 1024;
446         struct amdgpu_job *job;
447         struct amdgpu_ib *ib;
448         struct amdgpu_ib ib_msg;
449         struct dma_fence *f = NULL;
450         uint64_t addr;
451         int i, r;
452
453         r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
454                                      AMDGPU_FENCE_OWNER_UNDEFINED,
455                                      ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
456                                      &job);
457         if (r)
458                 return r;
459
460         memset(&ib_msg, 0, sizeof(ib_msg));
461         /* only one gpu page is needed, alloc +1 page to make addr aligned. */
462         r = amdgpu_ib_get(ring->adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
463                           AMDGPU_IB_POOL_DIRECT,
464                           &ib_msg);
465         if (r)
466                 goto err;
467
468         ib = &job->ibs[0];
469         /* let addr point to page boundary */
470         addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg.gpu_addr);
471
472         /* stitch together an VCE create msg */
473         ib->length_dw = 0;
474         ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
475         ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
476         ib->ptr[ib->length_dw++] = handle;
477
478         if ((ring->adev->vce.fw_version >> 24) >= 52)
479                 ib->ptr[ib->length_dw++] = 0x00000040; /* len */
480         else
481                 ib->ptr[ib->length_dw++] = 0x00000030; /* len */
482         ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
483         ib->ptr[ib->length_dw++] = 0x00000000;
484         ib->ptr[ib->length_dw++] = 0x00000042;
485         ib->ptr[ib->length_dw++] = 0x0000000a;
486         ib->ptr[ib->length_dw++] = 0x00000001;
487         ib->ptr[ib->length_dw++] = 0x00000080;
488         ib->ptr[ib->length_dw++] = 0x00000060;
489         ib->ptr[ib->length_dw++] = 0x00000100;
490         ib->ptr[ib->length_dw++] = 0x00000100;
491         ib->ptr[ib->length_dw++] = 0x0000000c;
492         ib->ptr[ib->length_dw++] = 0x00000000;
493         if ((ring->adev->vce.fw_version >> 24) >= 52) {
494                 ib->ptr[ib->length_dw++] = 0x00000000;
495                 ib->ptr[ib->length_dw++] = 0x00000000;
496                 ib->ptr[ib->length_dw++] = 0x00000000;
497                 ib->ptr[ib->length_dw++] = 0x00000000;
498         }
499
500         ib->ptr[ib->length_dw++] = 0x00000014; /* len */
501         ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
502         ib->ptr[ib->length_dw++] = upper_32_bits(addr);
503         ib->ptr[ib->length_dw++] = addr;
504         ib->ptr[ib->length_dw++] = 0x00000001;
505
506         for (i = ib->length_dw; i < ib_size_dw; ++i)
507                 ib->ptr[i] = 0x0;
508
509         r = amdgpu_job_submit_direct(job, ring, &f);
510         amdgpu_ib_free(ring->adev, &ib_msg, f);
511         if (r)
512                 goto err;
513
514         if (fence)
515                 *fence = dma_fence_get(f);
516         dma_fence_put(f);
517         return 0;
518
519 err:
520         amdgpu_job_free(job);
521         return r;
522 }
523
524 /**
525  * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
526  *
527  * @ring: ring we should submit the msg to
528  * @handle: VCE session handle to use
529  * @direct: direct or delayed pool
530  * @fence: optional fence to return
531  *
532  * Close up a stream for HW test or if userspace failed to do so
533  */
534 static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
535                                       bool direct, struct dma_fence **fence)
536 {
537         const unsigned ib_size_dw = 1024;
538         struct amdgpu_job *job;
539         struct amdgpu_ib *ib;
540         struct dma_fence *f = NULL;
541         int i, r;
542
543         r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
544                                      AMDGPU_FENCE_OWNER_UNDEFINED,
545                                      ib_size_dw * 4,
546                                      direct ? AMDGPU_IB_POOL_DIRECT :
547                                      AMDGPU_IB_POOL_DELAYED, &job);
548         if (r)
549                 return r;
550
551         ib = &job->ibs[0];
552
553         /* stitch together an VCE destroy msg */
554         ib->length_dw = 0;
555         ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
556         ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
557         ib->ptr[ib->length_dw++] = handle;
558
559         ib->ptr[ib->length_dw++] = 0x00000020; /* len */
560         ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
561         ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
562         ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
563         ib->ptr[ib->length_dw++] = 0x00000000;
564         ib->ptr[ib->length_dw++] = 0x00000000;
565         ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
566         ib->ptr[ib->length_dw++] = 0x00000000;
567
568         ib->ptr[ib->length_dw++] = 0x00000008; /* len */
569         ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
570
571         for (i = ib->length_dw; i < ib_size_dw; ++i)
572                 ib->ptr[i] = 0x0;
573
574         if (direct)
575                 r = amdgpu_job_submit_direct(job, ring, &f);
576         else
577                 f = amdgpu_job_submit(job);
578         if (r)
579                 goto err;
580
581         if (fence)
582                 *fence = dma_fence_get(f);
583         dma_fence_put(f);
584         return 0;
585
586 err:
587         amdgpu_job_free(job);
588         return r;
589 }
590
591 /**
592  * amdgpu_vce_validate_bo - make sure not to cross 4GB boundary
593  *
594  * @ib: indirect buffer to use
595  * @lo: address of lower dword
596  * @hi: address of higher dword
597  * @size: minimum size
598  * @index: bs/fb index
599  *
600  * Make sure that no BO cross a 4GB boundary.
601  */
602 static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p,
603                                   struct amdgpu_ib *ib, int lo, int hi,
604                                   unsigned size, int32_t index)
605 {
606         int64_t offset = ((uint64_t)size) * ((int64_t)index);
607         struct ttm_operation_ctx ctx = { false, false };
608         struct amdgpu_bo_va_mapping *mapping;
609         unsigned i, fpfn, lpfn;
610         struct amdgpu_bo *bo;
611         uint64_t addr;
612         int r;
613
614         addr = ((uint64_t)amdgpu_ib_get_value(ib, lo)) |
615                ((uint64_t)amdgpu_ib_get_value(ib, hi)) << 32;
616         if (index >= 0) {
617                 addr += offset;
618                 fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT;
619                 lpfn = 0x100000000ULL >> PAGE_SHIFT;
620         } else {
621                 fpfn = 0;
622                 lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT;
623         }
624
625         r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
626         if (r) {
627                 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
628                           addr, lo, hi, size, index);
629                 return r;
630         }
631
632         for (i = 0; i < bo->placement.num_placement; ++i) {
633                 bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn);
634                 bo->placements[i].lpfn = bo->placements[i].lpfn ?
635                         min(bo->placements[i].lpfn, lpfn) : lpfn;
636         }
637         return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
638 }
639
640
641 /**
642  * amdgpu_vce_cs_reloc - command submission relocation
643  *
644  * @p: parser context
645  * @ib: indirect buffer to use
646  * @lo: address of lower dword
647  * @hi: address of higher dword
648  * @size: minimum size
649  * @index: bs/fb index
650  *
651  * Patch relocation inside command stream with real buffer address
652  */
653 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, struct amdgpu_ib *ib,
654                                int lo, int hi, unsigned size, uint32_t index)
655 {
656         struct amdgpu_bo_va_mapping *mapping;
657         struct amdgpu_bo *bo;
658         uint64_t addr;
659         int r;
660
661         if (index == 0xffffffff)
662                 index = 0;
663
664         addr = ((uint64_t)amdgpu_ib_get_value(ib, lo)) |
665                ((uint64_t)amdgpu_ib_get_value(ib, hi)) << 32;
666         addr += ((uint64_t)size) * ((uint64_t)index);
667
668         r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
669         if (r) {
670                 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
671                           addr, lo, hi, size, index);
672                 return r;
673         }
674
675         if ((addr + (uint64_t)size) >
676             (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
677                 DRM_ERROR("BO too small for addr 0x%010Lx %d %d\n",
678                           addr, lo, hi);
679                 return -EINVAL;
680         }
681
682         addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
683         addr += amdgpu_bo_gpu_offset(bo);
684         addr -= ((uint64_t)size) * ((uint64_t)index);
685
686         amdgpu_ib_set_value(ib, lo, lower_32_bits(addr));
687         amdgpu_ib_set_value(ib, hi, upper_32_bits(addr));
688
689         return 0;
690 }
691
692 /**
693  * amdgpu_vce_validate_handle - validate stream handle
694  *
695  * @p: parser context
696  * @handle: handle to validate
697  * @allocated: allocated a new handle?
698  *
699  * Validates the handle and return the found session index or -EINVAL
700  * we we don't have another free session index.
701  */
702 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
703                                       uint32_t handle, uint32_t *allocated)
704 {
705         unsigned i;
706
707         /* validate the handle */
708         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
709                 if (atomic_read(&p->adev->vce.handles[i]) == handle) {
710                         if (p->adev->vce.filp[i] != p->filp) {
711                                 DRM_ERROR("VCE handle collision detected!\n");
712                                 return -EINVAL;
713                         }
714                         return i;
715                 }
716         }
717
718         /* handle not found try to alloc a new one */
719         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
720                 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
721                         p->adev->vce.filp[i] = p->filp;
722                         p->adev->vce.img_size[i] = 0;
723                         *allocated |= 1 << i;
724                         return i;
725                 }
726         }
727
728         DRM_ERROR("No more free VCE handles!\n");
729         return -EINVAL;
730 }
731
732 /**
733  * amdgpu_vce_ring_parse_cs - parse and validate the command stream
734  *
735  * @p: parser context
736  * @job: the job to parse
737  * @ib: the IB to patch
738  */
739 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p,
740                              struct amdgpu_job *job,
741                              struct amdgpu_ib *ib)
742 {
743         unsigned fb_idx = 0, bs_idx = 0;
744         int session_idx = -1;
745         uint32_t destroyed = 0;
746         uint32_t created = 0;
747         uint32_t allocated = 0;
748         uint32_t tmp, handle = 0;
749         uint32_t *size = &tmp;
750         unsigned idx;
751         int i, r = 0;
752
753         job->vm = NULL;
754         ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
755
756         for (idx = 0; idx < ib->length_dw;) {
757                 uint32_t len = amdgpu_ib_get_value(ib, idx);
758                 uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
759
760                 if ((len < 8) || (len & 3)) {
761                         DRM_ERROR("invalid VCE command length (%d)!\n", len);
762                         r = -EINVAL;
763                         goto out;
764                 }
765
766                 switch (cmd) {
767                 case 0x00000002: /* task info */
768                         fb_idx = amdgpu_ib_get_value(ib, idx + 6);
769                         bs_idx = amdgpu_ib_get_value(ib, idx + 7);
770                         break;
771
772                 case 0x03000001: /* encode */
773                         r = amdgpu_vce_validate_bo(p, ib, idx + 10, idx + 9,
774                                                    0, 0);
775                         if (r)
776                                 goto out;
777
778                         r = amdgpu_vce_validate_bo(p, ib, idx + 12, idx + 11,
779                                                    0, 0);
780                         if (r)
781                                 goto out;
782                         break;
783
784                 case 0x05000001: /* context buffer */
785                         r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
786                                                    0, 0);
787                         if (r)
788                                 goto out;
789                         break;
790
791                 case 0x05000004: /* video bitstream buffer */
792                         tmp = amdgpu_ib_get_value(ib, idx + 4);
793                         r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
794                                                    tmp, bs_idx);
795                         if (r)
796                                 goto out;
797                         break;
798
799                 case 0x05000005: /* feedback buffer */
800                         r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
801                                                    4096, fb_idx);
802                         if (r)
803                                 goto out;
804                         break;
805
806                 case 0x0500000d: /* MV buffer */
807                         r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
808                                                    0, 0);
809                         if (r)
810                                 goto out;
811
812                         r = amdgpu_vce_validate_bo(p, ib, idx + 8, idx + 7,
813                                                    0, 0);
814                         if (r)
815                                 goto out;
816                         break;
817                 }
818
819                 idx += len / 4;
820         }
821
822         for (idx = 0; idx < ib->length_dw;) {
823                 uint32_t len = amdgpu_ib_get_value(ib, idx);
824                 uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
825
826                 switch (cmd) {
827                 case 0x00000001: /* session */
828                         handle = amdgpu_ib_get_value(ib, idx + 2);
829                         session_idx = amdgpu_vce_validate_handle(p, handle,
830                                                                  &allocated);
831                         if (session_idx < 0) {
832                                 r = session_idx;
833                                 goto out;
834                         }
835                         size = &p->adev->vce.img_size[session_idx];
836                         break;
837
838                 case 0x00000002: /* task info */
839                         fb_idx = amdgpu_ib_get_value(ib, idx + 6);
840                         bs_idx = amdgpu_ib_get_value(ib, idx + 7);
841                         break;
842
843                 case 0x01000001: /* create */
844                         created |= 1 << session_idx;
845                         if (destroyed & (1 << session_idx)) {
846                                 destroyed &= ~(1 << session_idx);
847                                 allocated |= 1 << session_idx;
848
849                         } else if (!(allocated & (1 << session_idx))) {
850                                 DRM_ERROR("Handle already in use!\n");
851                                 r = -EINVAL;
852                                 goto out;
853                         }
854
855                         *size = amdgpu_ib_get_value(ib, idx + 8) *
856                                 amdgpu_ib_get_value(ib, idx + 10) *
857                                 8 * 3 / 2;
858                         break;
859
860                 case 0x04000001: /* config extension */
861                 case 0x04000002: /* pic control */
862                 case 0x04000005: /* rate control */
863                 case 0x04000007: /* motion estimation */
864                 case 0x04000008: /* rdo */
865                 case 0x04000009: /* vui */
866                 case 0x05000002: /* auxiliary buffer */
867                 case 0x05000009: /* clock table */
868                         break;
869
870                 case 0x0500000c: /* hw config */
871                         switch (p->adev->asic_type) {
872 #ifdef CONFIG_DRM_AMDGPU_CIK
873                         case CHIP_KAVERI:
874                         case CHIP_MULLINS:
875 #endif
876                         case CHIP_CARRIZO:
877                                 break;
878                         default:
879                                 r = -EINVAL;
880                                 goto out;
881                         }
882                         break;
883
884                 case 0x03000001: /* encode */
885                         r = amdgpu_vce_cs_reloc(p, ib, idx + 10, idx + 9,
886                                                 *size, 0);
887                         if (r)
888                                 goto out;
889
890                         r = amdgpu_vce_cs_reloc(p, ib, idx + 12, idx + 11,
891                                                 *size / 3, 0);
892                         if (r)
893                                 goto out;
894                         break;
895
896                 case 0x02000001: /* destroy */
897                         destroyed |= 1 << session_idx;
898                         break;
899
900                 case 0x05000001: /* context buffer */
901                         r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
902                                                 *size * 2, 0);
903                         if (r)
904                                 goto out;
905                         break;
906
907                 case 0x05000004: /* video bitstream buffer */
908                         tmp = amdgpu_ib_get_value(ib, idx + 4);
909                         r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
910                                                 tmp, bs_idx);
911                         if (r)
912                                 goto out;
913                         break;
914
915                 case 0x05000005: /* feedback buffer */
916                         r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
917                                                 4096, fb_idx);
918                         if (r)
919                                 goto out;
920                         break;
921
922                 case 0x0500000d: /* MV buffer */
923                         r = amdgpu_vce_cs_reloc(p, ib, idx + 3,
924                                                 idx + 2, *size, 0);
925                         if (r)
926                                 goto out;
927
928                         r = amdgpu_vce_cs_reloc(p, ib, idx + 8,
929                                                 idx + 7, *size / 12, 0);
930                         if (r)
931                                 goto out;
932                         break;
933
934                 default:
935                         DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
936                         r = -EINVAL;
937                         goto out;
938                 }
939
940                 if (session_idx == -1) {
941                         DRM_ERROR("no session command at start of IB\n");
942                         r = -EINVAL;
943                         goto out;
944                 }
945
946                 idx += len / 4;
947         }
948
949         if (allocated & ~created) {
950                 DRM_ERROR("New session without create command!\n");
951                 r = -ENOENT;
952         }
953
954 out:
955         if (!r) {
956                 /* No error, free all destroyed handle slots */
957                 tmp = destroyed;
958         } else {
959                 /* Error during parsing, free all allocated handle slots */
960                 tmp = allocated;
961         }
962
963         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
964                 if (tmp & (1 << i))
965                         atomic_set(&p->adev->vce.handles[i], 0);
966
967         return r;
968 }
969
970 /**
971  * amdgpu_vce_ring_parse_cs_vm - parse the command stream in VM mode
972  *
973  * @p: parser context
974  * @job: the job to parse
975  * @ib: the IB to patch
976  */
977 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p,
978                                 struct amdgpu_job *job,
979                                 struct amdgpu_ib *ib)
980 {
981         int session_idx = -1;
982         uint32_t destroyed = 0;
983         uint32_t created = 0;
984         uint32_t allocated = 0;
985         uint32_t tmp, handle = 0;
986         int i, r = 0, idx = 0;
987
988         while (idx < ib->length_dw) {
989                 uint32_t len = amdgpu_ib_get_value(ib, idx);
990                 uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
991
992                 if ((len < 8) || (len & 3)) {
993                         DRM_ERROR("invalid VCE command length (%d)!\n", len);
994                         r = -EINVAL;
995                         goto out;
996                 }
997
998                 switch (cmd) {
999                 case 0x00000001: /* session */
1000                         handle = amdgpu_ib_get_value(ib, idx + 2);
1001                         session_idx = amdgpu_vce_validate_handle(p, handle,
1002                                                                  &allocated);
1003                         if (session_idx < 0) {
1004                                 r = session_idx;
1005                                 goto out;
1006                         }
1007                         break;
1008
1009                 case 0x01000001: /* create */
1010                         created |= 1 << session_idx;
1011                         if (destroyed & (1 << session_idx)) {
1012                                 destroyed &= ~(1 << session_idx);
1013                                 allocated |= 1 << session_idx;
1014
1015                         } else if (!(allocated & (1 << session_idx))) {
1016                                 DRM_ERROR("Handle already in use!\n");
1017                                 r = -EINVAL;
1018                                 goto out;
1019                         }
1020
1021                         break;
1022
1023                 case 0x02000001: /* destroy */
1024                         destroyed |= 1 << session_idx;
1025                         break;
1026
1027                 default:
1028                         break;
1029                 }
1030
1031                 if (session_idx == -1) {
1032                         DRM_ERROR("no session command at start of IB\n");
1033                         r = -EINVAL;
1034                         goto out;
1035                 }
1036
1037                 idx += len / 4;
1038         }
1039
1040         if (allocated & ~created) {
1041                 DRM_ERROR("New session without create command!\n");
1042                 r = -ENOENT;
1043         }
1044
1045 out:
1046         if (!r) {
1047                 /* No error, free all destroyed handle slots */
1048                 tmp = destroyed;
1049                 amdgpu_ib_free(p->adev, ib, NULL);
1050         } else {
1051                 /* Error during parsing, free all allocated handle slots */
1052                 tmp = allocated;
1053         }
1054
1055         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
1056                 if (tmp & (1 << i))
1057                         atomic_set(&p->adev->vce.handles[i], 0);
1058
1059         return r;
1060 }
1061
1062 /**
1063  * amdgpu_vce_ring_emit_ib - execute indirect buffer
1064  *
1065  * @ring: engine to use
1066  * @job: job to retrieve vmid from
1067  * @ib: the IB to execute
1068  * @flags: unused
1069  *
1070  */
1071 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
1072                                 struct amdgpu_job *job,
1073                                 struct amdgpu_ib *ib,
1074                                 uint32_t flags)
1075 {
1076         amdgpu_ring_write(ring, VCE_CMD_IB);
1077         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1078         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1079         amdgpu_ring_write(ring, ib->length_dw);
1080 }
1081
1082 /**
1083  * amdgpu_vce_ring_emit_fence - add a fence command to the ring
1084  *
1085  * @ring: engine to use
1086  * @addr: address
1087  * @seq: sequence number
1088  * @flags: fence related flags
1089  *
1090  */
1091 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1092                                 unsigned flags)
1093 {
1094         WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1095
1096         amdgpu_ring_write(ring, VCE_CMD_FENCE);
1097         amdgpu_ring_write(ring, addr);
1098         amdgpu_ring_write(ring, upper_32_bits(addr));
1099         amdgpu_ring_write(ring, seq);
1100         amdgpu_ring_write(ring, VCE_CMD_TRAP);
1101         amdgpu_ring_write(ring, VCE_CMD_END);
1102 }
1103
1104 /**
1105  * amdgpu_vce_ring_test_ring - test if VCE ring is working
1106  *
1107  * @ring: the engine to test on
1108  *
1109  */
1110 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
1111 {
1112         struct amdgpu_device *adev = ring->adev;
1113         uint32_t rptr;
1114         unsigned i;
1115         int r, timeout = adev->usec_timeout;
1116
1117         /* skip ring test for sriov*/
1118         if (amdgpu_sriov_vf(adev))
1119                 return 0;
1120
1121         r = amdgpu_ring_alloc(ring, 16);
1122         if (r)
1123                 return r;
1124
1125         rptr = amdgpu_ring_get_rptr(ring);
1126
1127         amdgpu_ring_write(ring, VCE_CMD_END);
1128         amdgpu_ring_commit(ring);
1129
1130         for (i = 0; i < timeout; i++) {
1131                 if (amdgpu_ring_get_rptr(ring) != rptr)
1132                         break;
1133                 udelay(1);
1134         }
1135
1136         if (i >= timeout)
1137                 r = -ETIMEDOUT;
1138
1139         return r;
1140 }
1141
1142 /**
1143  * amdgpu_vce_ring_test_ib - test if VCE IBs are working
1144  *
1145  * @ring: the engine to test on
1146  * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
1147  *
1148  */
1149 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1150 {
1151         struct dma_fence *fence = NULL;
1152         long r;
1153
1154         /* skip vce ring1/2 ib test for now, since it's not reliable */
1155         if (ring != &ring->adev->vce.ring[0])
1156                 return 0;
1157
1158         r = amdgpu_vce_get_create_msg(ring, 1, NULL);
1159         if (r)
1160                 goto error;
1161
1162         r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
1163         if (r)
1164                 goto error;
1165
1166         r = dma_fence_wait_timeout(fence, false, timeout);
1167         if (r == 0)
1168                 r = -ETIMEDOUT;
1169         else if (r > 0)
1170                 r = 0;
1171
1172 error:
1173         dma_fence_put(fence);
1174         return r;
1175 }
1176
1177 enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring)
1178 {
1179         switch(ring) {
1180         case 0:
1181                 return AMDGPU_RING_PRIO_0;
1182         case 1:
1183                 return AMDGPU_RING_PRIO_1;
1184         case 2:
1185                 return AMDGPU_RING_PRIO_2;
1186         default:
1187                 return AMDGPU_RING_PRIO_0;
1188         }
1189 }
This page took 0.108177 seconds and 4 git commands to generate.