]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
Merge drm/drm-next into drm-intel-next
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vce.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  * Authors: Christian König <[email protected]>
26  */
27
28 #include <linux/firmware.h>
29 #include <linux/module.h>
30
31 #include <drm/drm.h>
32 #include <drm/drm_drv.h>
33
34 #include "amdgpu.h"
35 #include "amdgpu_pm.h"
36 #include "amdgpu_vce.h"
37 #include "cikd.h"
38
39 /* 1 second timeout */
40 #define VCE_IDLE_TIMEOUT        msecs_to_jiffies(1000)
41
42 /* Firmware Names */
43 #ifdef CONFIG_DRM_AMDGPU_CIK
44 #define FIRMWARE_BONAIRE        "amdgpu/bonaire_vce.bin"
45 #define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
46 #define FIRMWARE_KAVERI "amdgpu/kaveri_vce.bin"
47 #define FIRMWARE_HAWAII "amdgpu/hawaii_vce.bin"
48 #define FIRMWARE_MULLINS        "amdgpu/mullins_vce.bin"
49 #endif
50 #define FIRMWARE_TONGA          "amdgpu/tonga_vce.bin"
51 #define FIRMWARE_CARRIZO        "amdgpu/carrizo_vce.bin"
52 #define FIRMWARE_FIJI           "amdgpu/fiji_vce.bin"
53 #define FIRMWARE_STONEY         "amdgpu/stoney_vce.bin"
54 #define FIRMWARE_POLARIS10      "amdgpu/polaris10_vce.bin"
55 #define FIRMWARE_POLARIS11      "amdgpu/polaris11_vce.bin"
56 #define FIRMWARE_POLARIS12      "amdgpu/polaris12_vce.bin"
57 #define FIRMWARE_VEGAM          "amdgpu/vegam_vce.bin"
58
59 #define FIRMWARE_VEGA10         "amdgpu/vega10_vce.bin"
60 #define FIRMWARE_VEGA12         "amdgpu/vega12_vce.bin"
61 #define FIRMWARE_VEGA20         "amdgpu/vega20_vce.bin"
62
63 #ifdef CONFIG_DRM_AMDGPU_CIK
64 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
65 MODULE_FIRMWARE(FIRMWARE_KABINI);
66 MODULE_FIRMWARE(FIRMWARE_KAVERI);
67 MODULE_FIRMWARE(FIRMWARE_HAWAII);
68 MODULE_FIRMWARE(FIRMWARE_MULLINS);
69 #endif
70 MODULE_FIRMWARE(FIRMWARE_TONGA);
71 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
72 MODULE_FIRMWARE(FIRMWARE_FIJI);
73 MODULE_FIRMWARE(FIRMWARE_STONEY);
74 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
75 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
76 MODULE_FIRMWARE(FIRMWARE_POLARIS12);
77 MODULE_FIRMWARE(FIRMWARE_VEGAM);
78
79 MODULE_FIRMWARE(FIRMWARE_VEGA10);
80 MODULE_FIRMWARE(FIRMWARE_VEGA12);
81 MODULE_FIRMWARE(FIRMWARE_VEGA20);
82
83 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
84 static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
85                                      struct dma_fence **fence);
86 static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
87                                       bool direct, struct dma_fence **fence);
88
89 /**
90  * amdgpu_vce_sw_init - allocate memory, load vce firmware
91  *
92  * @adev: amdgpu_device pointer
93  * @size: size for the new BO
94  *
95  * First step to get VCE online, allocate memory and load the firmware
96  */
97 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
98 {
99         const char *fw_name;
100         const struct common_firmware_header *hdr;
101         unsigned ucode_version, version_major, version_minor, binary_id;
102         int i, r;
103
104         switch (adev->asic_type) {
105 #ifdef CONFIG_DRM_AMDGPU_CIK
106         case CHIP_BONAIRE:
107                 fw_name = FIRMWARE_BONAIRE;
108                 break;
109         case CHIP_KAVERI:
110                 fw_name = FIRMWARE_KAVERI;
111                 break;
112         case CHIP_KABINI:
113                 fw_name = FIRMWARE_KABINI;
114                 break;
115         case CHIP_HAWAII:
116                 fw_name = FIRMWARE_HAWAII;
117                 break;
118         case CHIP_MULLINS:
119                 fw_name = FIRMWARE_MULLINS;
120                 break;
121 #endif
122         case CHIP_TONGA:
123                 fw_name = FIRMWARE_TONGA;
124                 break;
125         case CHIP_CARRIZO:
126                 fw_name = FIRMWARE_CARRIZO;
127                 break;
128         case CHIP_FIJI:
129                 fw_name = FIRMWARE_FIJI;
130                 break;
131         case CHIP_STONEY:
132                 fw_name = FIRMWARE_STONEY;
133                 break;
134         case CHIP_POLARIS10:
135                 fw_name = FIRMWARE_POLARIS10;
136                 break;
137         case CHIP_POLARIS11:
138                 fw_name = FIRMWARE_POLARIS11;
139                 break;
140         case CHIP_POLARIS12:
141                 fw_name = FIRMWARE_POLARIS12;
142                 break;
143         case CHIP_VEGAM:
144                 fw_name = FIRMWARE_VEGAM;
145                 break;
146         case CHIP_VEGA10:
147                 fw_name = FIRMWARE_VEGA10;
148                 break;
149         case CHIP_VEGA12:
150                 fw_name = FIRMWARE_VEGA12;
151                 break;
152         case CHIP_VEGA20:
153                 fw_name = FIRMWARE_VEGA20;
154                 break;
155
156         default:
157                 return -EINVAL;
158         }
159
160         r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
161         if (r) {
162                 dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
163                         fw_name);
164                 return r;
165         }
166
167         r = amdgpu_ucode_validate(adev->vce.fw);
168         if (r) {
169                 dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
170                         fw_name);
171                 release_firmware(adev->vce.fw);
172                 adev->vce.fw = NULL;
173                 return r;
174         }
175
176         hdr = (const struct common_firmware_header *)adev->vce.fw->data;
177
178         ucode_version = le32_to_cpu(hdr->ucode_version);
179         version_major = (ucode_version >> 20) & 0xfff;
180         version_minor = (ucode_version >> 8) & 0xfff;
181         binary_id = ucode_version & 0xff;
182         DRM_INFO("Found VCE firmware Version: %d.%d Binary ID: %d\n",
183                 version_major, version_minor, binary_id);
184         adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
185                                 (binary_id << 8));
186
187         r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
188                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->vce.vcpu_bo,
189                                     &adev->vce.gpu_addr, &adev->vce.cpu_addr);
190         if (r) {
191                 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
192                 return r;
193         }
194
195         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
196                 atomic_set(&adev->vce.handles[i], 0);
197                 adev->vce.filp[i] = NULL;
198         }
199
200         INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
201         mutex_init(&adev->vce.idle_mutex);
202
203         return 0;
204 }
205
206 /**
207  * amdgpu_vce_sw_fini - free memory
208  *
209  * @adev: amdgpu_device pointer
210  *
211  * Last step on VCE teardown, free firmware memory
212  */
213 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
214 {
215         unsigned i;
216
217         if (adev->vce.vcpu_bo == NULL)
218                 return 0;
219
220         drm_sched_entity_destroy(&adev->vce.entity);
221
222         amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
223                 (void **)&adev->vce.cpu_addr);
224
225         for (i = 0; i < adev->vce.num_rings; i++)
226                 amdgpu_ring_fini(&adev->vce.ring[i]);
227
228         release_firmware(adev->vce.fw);
229         mutex_destroy(&adev->vce.idle_mutex);
230
231         return 0;
232 }
233
234 /**
235  * amdgpu_vce_entity_init - init entity
236  *
237  * @adev: amdgpu_device pointer
238  *
239  */
240 int amdgpu_vce_entity_init(struct amdgpu_device *adev)
241 {
242         struct amdgpu_ring *ring;
243         struct drm_gpu_scheduler *sched;
244         int r;
245
246         ring = &adev->vce.ring[0];
247         sched = &ring->sched;
248         r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
249                                   &sched, 1, NULL);
250         if (r != 0) {
251                 DRM_ERROR("Failed setting up VCE run queue.\n");
252                 return r;
253         }
254
255         return 0;
256 }
257
258 /**
259  * amdgpu_vce_suspend - unpin VCE fw memory
260  *
261  * @adev: amdgpu_device pointer
262  *
263  */
264 int amdgpu_vce_suspend(struct amdgpu_device *adev)
265 {
266         int i;
267
268         cancel_delayed_work_sync(&adev->vce.idle_work);
269
270         if (adev->vce.vcpu_bo == NULL)
271                 return 0;
272
273         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
274                 if (atomic_read(&adev->vce.handles[i]))
275                         break;
276
277         if (i == AMDGPU_MAX_VCE_HANDLES)
278                 return 0;
279
280         /* TODO: suspending running encoding sessions isn't supported */
281         return -EINVAL;
282 }
283
284 /**
285  * amdgpu_vce_resume - pin VCE fw memory
286  *
287  * @adev: amdgpu_device pointer
288  *
289  */
290 int amdgpu_vce_resume(struct amdgpu_device *adev)
291 {
292         void *cpu_addr;
293         const struct common_firmware_header *hdr;
294         unsigned offset;
295         int r, idx;
296
297         if (adev->vce.vcpu_bo == NULL)
298                 return -EINVAL;
299
300         r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
301         if (r) {
302                 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
303                 return r;
304         }
305
306         r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
307         if (r) {
308                 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
309                 dev_err(adev->dev, "(%d) VCE map failed\n", r);
310                 return r;
311         }
312
313         hdr = (const struct common_firmware_header *)adev->vce.fw->data;
314         offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
315
316         if (drm_dev_enter(adev_to_drm(adev), &idx)) {
317                 memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
318                             adev->vce.fw->size - offset);
319                 drm_dev_exit(idx);
320         }
321
322         amdgpu_bo_kunmap(adev->vce.vcpu_bo);
323
324         amdgpu_bo_unreserve(adev->vce.vcpu_bo);
325
326         return 0;
327 }
328
329 /**
330  * amdgpu_vce_idle_work_handler - power off VCE
331  *
332  * @work: pointer to work structure
333  *
334  * power of VCE when it's not used any more
335  */
336 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
337 {
338         struct amdgpu_device *adev =
339                 container_of(work, struct amdgpu_device, vce.idle_work.work);
340         unsigned i, count = 0;
341
342         for (i = 0; i < adev->vce.num_rings; i++)
343                 count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
344
345         if (count == 0) {
346                 if (adev->pm.dpm_enabled) {
347                         amdgpu_dpm_enable_vce(adev, false);
348                 } else {
349                         amdgpu_asic_set_vce_clocks(adev, 0, 0);
350                         amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
351                                                                AMD_PG_STATE_GATE);
352                         amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
353                                                                AMD_CG_STATE_GATE);
354                 }
355         } else {
356                 schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
357         }
358 }
359
360 /**
361  * amdgpu_vce_ring_begin_use - power up VCE
362  *
363  * @ring: amdgpu ring
364  *
365  * Make sure VCE is powerd up when we want to use it
366  */
367 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
368 {
369         struct amdgpu_device *adev = ring->adev;
370         bool set_clocks;
371
372         if (amdgpu_sriov_vf(adev))
373                 return;
374
375         mutex_lock(&adev->vce.idle_mutex);
376         set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
377         if (set_clocks) {
378                 if (adev->pm.dpm_enabled) {
379                         amdgpu_dpm_enable_vce(adev, true);
380                 } else {
381                         amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
382                         amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
383                                                                AMD_CG_STATE_UNGATE);
384                         amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
385                                                                AMD_PG_STATE_UNGATE);
386
387                 }
388         }
389         mutex_unlock(&adev->vce.idle_mutex);
390 }
391
392 /**
393  * amdgpu_vce_ring_end_use - power VCE down
394  *
395  * @ring: amdgpu ring
396  *
397  * Schedule work to power VCE down again
398  */
399 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
400 {
401         if (!amdgpu_sriov_vf(ring->adev))
402                 schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
403 }
404
405 /**
406  * amdgpu_vce_free_handles - free still open VCE handles
407  *
408  * @adev: amdgpu_device pointer
409  * @filp: drm file pointer
410  *
411  * Close all VCE handles still open by this file pointer
412  */
413 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
414 {
415         struct amdgpu_ring *ring = &adev->vce.ring[0];
416         int i, r;
417         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
418                 uint32_t handle = atomic_read(&adev->vce.handles[i]);
419
420                 if (!handle || adev->vce.filp[i] != filp)
421                         continue;
422
423                 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
424                 if (r)
425                         DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
426
427                 adev->vce.filp[i] = NULL;
428                 atomic_set(&adev->vce.handles[i], 0);
429         }
430 }
431
432 /**
433  * amdgpu_vce_get_create_msg - generate a VCE create msg
434  *
435  * @ring: ring we should submit the msg to
436  * @handle: VCE session handle to use
437  * @bo: amdgpu object for which we query the offset
438  * @fence: optional fence to return
439  *
440  * Open up a stream for HW test
441  */
442 static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
443                                      struct dma_fence **fence)
444 {
445         const unsigned ib_size_dw = 1024;
446         struct amdgpu_job *job;
447         struct amdgpu_ib *ib;
448         struct amdgpu_ib ib_msg;
449         struct dma_fence *f = NULL;
450         uint64_t addr;
451         int i, r;
452
453         r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
454                                      AMDGPU_IB_POOL_DIRECT, &job);
455         if (r)
456                 return r;
457
458         memset(&ib_msg, 0, sizeof(ib_msg));
459         /* only one gpu page is needed, alloc +1 page to make addr aligned. */
460         r = amdgpu_ib_get(ring->adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
461                           AMDGPU_IB_POOL_DIRECT,
462                           &ib_msg);
463         if (r)
464                 goto err;
465
466         ib = &job->ibs[0];
467         /* let addr point to page boundary */
468         addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg.gpu_addr);
469
470         /* stitch together an VCE create msg */
471         ib->length_dw = 0;
472         ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
473         ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
474         ib->ptr[ib->length_dw++] = handle;
475
476         if ((ring->adev->vce.fw_version >> 24) >= 52)
477                 ib->ptr[ib->length_dw++] = 0x00000040; /* len */
478         else
479                 ib->ptr[ib->length_dw++] = 0x00000030; /* len */
480         ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
481         ib->ptr[ib->length_dw++] = 0x00000000;
482         ib->ptr[ib->length_dw++] = 0x00000042;
483         ib->ptr[ib->length_dw++] = 0x0000000a;
484         ib->ptr[ib->length_dw++] = 0x00000001;
485         ib->ptr[ib->length_dw++] = 0x00000080;
486         ib->ptr[ib->length_dw++] = 0x00000060;
487         ib->ptr[ib->length_dw++] = 0x00000100;
488         ib->ptr[ib->length_dw++] = 0x00000100;
489         ib->ptr[ib->length_dw++] = 0x0000000c;
490         ib->ptr[ib->length_dw++] = 0x00000000;
491         if ((ring->adev->vce.fw_version >> 24) >= 52) {
492                 ib->ptr[ib->length_dw++] = 0x00000000;
493                 ib->ptr[ib->length_dw++] = 0x00000000;
494                 ib->ptr[ib->length_dw++] = 0x00000000;
495                 ib->ptr[ib->length_dw++] = 0x00000000;
496         }
497
498         ib->ptr[ib->length_dw++] = 0x00000014; /* len */
499         ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
500         ib->ptr[ib->length_dw++] = upper_32_bits(addr);
501         ib->ptr[ib->length_dw++] = addr;
502         ib->ptr[ib->length_dw++] = 0x00000001;
503
504         for (i = ib->length_dw; i < ib_size_dw; ++i)
505                 ib->ptr[i] = 0x0;
506
507         r = amdgpu_job_submit_direct(job, ring, &f);
508         amdgpu_ib_free(ring->adev, &ib_msg, f);
509         if (r)
510                 goto err;
511
512         if (fence)
513                 *fence = dma_fence_get(f);
514         dma_fence_put(f);
515         return 0;
516
517 err:
518         amdgpu_job_free(job);
519         return r;
520 }
521
522 /**
523  * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
524  *
525  * @ring: ring we should submit the msg to
526  * @handle: VCE session handle to use
527  * @direct: direct or delayed pool
528  * @fence: optional fence to return
529  *
530  * Close up a stream for HW test or if userspace failed to do so
531  */
532 static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
533                                       bool direct, struct dma_fence **fence)
534 {
535         const unsigned ib_size_dw = 1024;
536         struct amdgpu_job *job;
537         struct amdgpu_ib *ib;
538         struct dma_fence *f = NULL;
539         int i, r;
540
541         r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
542                                      direct ? AMDGPU_IB_POOL_DIRECT :
543                                      AMDGPU_IB_POOL_DELAYED, &job);
544         if (r)
545                 return r;
546
547         ib = &job->ibs[0];
548
549         /* stitch together an VCE destroy msg */
550         ib->length_dw = 0;
551         ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
552         ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
553         ib->ptr[ib->length_dw++] = handle;
554
555         ib->ptr[ib->length_dw++] = 0x00000020; /* len */
556         ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
557         ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
558         ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
559         ib->ptr[ib->length_dw++] = 0x00000000;
560         ib->ptr[ib->length_dw++] = 0x00000000;
561         ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
562         ib->ptr[ib->length_dw++] = 0x00000000;
563
564         ib->ptr[ib->length_dw++] = 0x00000008; /* len */
565         ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
566
567         for (i = ib->length_dw; i < ib_size_dw; ++i)
568                 ib->ptr[i] = 0x0;
569
570         if (direct)
571                 r = amdgpu_job_submit_direct(job, ring, &f);
572         else
573                 r = amdgpu_job_submit(job, &ring->adev->vce.entity,
574                                       AMDGPU_FENCE_OWNER_UNDEFINED, &f);
575         if (r)
576                 goto err;
577
578         if (fence)
579                 *fence = dma_fence_get(f);
580         dma_fence_put(f);
581         return 0;
582
583 err:
584         amdgpu_job_free(job);
585         return r;
586 }
587
588 /**
589  * amdgpu_vce_validate_bo - make sure not to cross 4GB boundary
590  *
591  * @p: parser context
592  * @ib_idx: indirect buffer to use
593  * @lo: address of lower dword
594  * @hi: address of higher dword
595  * @size: minimum size
596  * @index: bs/fb index
597  *
598  * Make sure that no BO cross a 4GB boundary.
599  */
600 static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p, uint32_t ib_idx,
601                                   int lo, int hi, unsigned size, int32_t index)
602 {
603         int64_t offset = ((uint64_t)size) * ((int64_t)index);
604         struct ttm_operation_ctx ctx = { false, false };
605         struct amdgpu_bo_va_mapping *mapping;
606         unsigned i, fpfn, lpfn;
607         struct amdgpu_bo *bo;
608         uint64_t addr;
609         int r;
610
611         addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
612                ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
613         if (index >= 0) {
614                 addr += offset;
615                 fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT;
616                 lpfn = 0x100000000ULL >> PAGE_SHIFT;
617         } else {
618                 fpfn = 0;
619                 lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT;
620         }
621
622         r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
623         if (r) {
624                 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
625                           addr, lo, hi, size, index);
626                 return r;
627         }
628
629         for (i = 0; i < bo->placement.num_placement; ++i) {
630                 bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn);
631                 bo->placements[i].lpfn = bo->placements[i].lpfn ?
632                         min(bo->placements[i].lpfn, lpfn) : lpfn;
633         }
634         return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
635 }
636
637
638 /**
639  * amdgpu_vce_cs_reloc - command submission relocation
640  *
641  * @p: parser context
642  * @ib_idx: indirect buffer to use
643  * @lo: address of lower dword
644  * @hi: address of higher dword
645  * @size: minimum size
646  * @index: bs/fb index
647  *
648  * Patch relocation inside command stream with real buffer address
649  */
650 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
651                                int lo, int hi, unsigned size, uint32_t index)
652 {
653         struct amdgpu_bo_va_mapping *mapping;
654         struct amdgpu_bo *bo;
655         uint64_t addr;
656         int r;
657
658         if (index == 0xffffffff)
659                 index = 0;
660
661         addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
662                ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
663         addr += ((uint64_t)size) * ((uint64_t)index);
664
665         r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
666         if (r) {
667                 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
668                           addr, lo, hi, size, index);
669                 return r;
670         }
671
672         if ((addr + (uint64_t)size) >
673             (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
674                 DRM_ERROR("BO too small for addr 0x%010Lx %d %d\n",
675                           addr, lo, hi);
676                 return -EINVAL;
677         }
678
679         addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
680         addr += amdgpu_bo_gpu_offset(bo);
681         addr -= ((uint64_t)size) * ((uint64_t)index);
682
683         amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
684         amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
685
686         return 0;
687 }
688
689 /**
690  * amdgpu_vce_validate_handle - validate stream handle
691  *
692  * @p: parser context
693  * @handle: handle to validate
694  * @allocated: allocated a new handle?
695  *
696  * Validates the handle and return the found session index or -EINVAL
697  * we we don't have another free session index.
698  */
699 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
700                                       uint32_t handle, uint32_t *allocated)
701 {
702         unsigned i;
703
704         /* validate the handle */
705         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
706                 if (atomic_read(&p->adev->vce.handles[i]) == handle) {
707                         if (p->adev->vce.filp[i] != p->filp) {
708                                 DRM_ERROR("VCE handle collision detected!\n");
709                                 return -EINVAL;
710                         }
711                         return i;
712                 }
713         }
714
715         /* handle not found try to alloc a new one */
716         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
717                 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
718                         p->adev->vce.filp[i] = p->filp;
719                         p->adev->vce.img_size[i] = 0;
720                         *allocated |= 1 << i;
721                         return i;
722                 }
723         }
724
725         DRM_ERROR("No more free VCE handles!\n");
726         return -EINVAL;
727 }
728
729 /**
730  * amdgpu_vce_ring_parse_cs - parse and validate the command stream
731  *
732  * @p: parser context
733  * @ib_idx: indirect buffer to use
734  */
735 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
736 {
737         struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
738         unsigned fb_idx = 0, bs_idx = 0;
739         int session_idx = -1;
740         uint32_t destroyed = 0;
741         uint32_t created = 0;
742         uint32_t allocated = 0;
743         uint32_t tmp, handle = 0;
744         uint32_t *size = &tmp;
745         unsigned idx;
746         int i, r = 0;
747
748         p->job->vm = NULL;
749         ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
750
751         for (idx = 0; idx < ib->length_dw;) {
752                 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
753                 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
754
755                 if ((len < 8) || (len & 3)) {
756                         DRM_ERROR("invalid VCE command length (%d)!\n", len);
757                         r = -EINVAL;
758                         goto out;
759                 }
760
761                 switch (cmd) {
762                 case 0x00000002: /* task info */
763                         fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
764                         bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
765                         break;
766
767                 case 0x03000001: /* encode */
768                         r = amdgpu_vce_validate_bo(p, ib_idx, idx + 10,
769                                                    idx + 9, 0, 0);
770                         if (r)
771                                 goto out;
772
773                         r = amdgpu_vce_validate_bo(p, ib_idx, idx + 12,
774                                                    idx + 11, 0, 0);
775                         if (r)
776                                 goto out;
777                         break;
778
779                 case 0x05000001: /* context buffer */
780                         r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
781                                                    idx + 2, 0, 0);
782                         if (r)
783                                 goto out;
784                         break;
785
786                 case 0x05000004: /* video bitstream buffer */
787                         tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
788                         r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
789                                                    tmp, bs_idx);
790                         if (r)
791                                 goto out;
792                         break;
793
794                 case 0x05000005: /* feedback buffer */
795                         r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, idx + 2,
796                                                    4096, fb_idx);
797                         if (r)
798                                 goto out;
799                         break;
800
801                 case 0x0500000d: /* MV buffer */
802                         r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3,
803                                                         idx + 2, 0, 0);
804                         if (r)
805                                 goto out;
806
807                         r = amdgpu_vce_validate_bo(p, ib_idx, idx + 8,
808                                                         idx + 7, 0, 0);
809                         if (r)
810                                 goto out;
811                         break;
812                 }
813
814                 idx += len / 4;
815         }
816
817         for (idx = 0; idx < ib->length_dw;) {
818                 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
819                 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
820
821                 switch (cmd) {
822                 case 0x00000001: /* session */
823                         handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
824                         session_idx = amdgpu_vce_validate_handle(p, handle,
825                                                                  &allocated);
826                         if (session_idx < 0) {
827                                 r = session_idx;
828                                 goto out;
829                         }
830                         size = &p->adev->vce.img_size[session_idx];
831                         break;
832
833                 case 0x00000002: /* task info */
834                         fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
835                         bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
836                         break;
837
838                 case 0x01000001: /* create */
839                         created |= 1 << session_idx;
840                         if (destroyed & (1 << session_idx)) {
841                                 destroyed &= ~(1 << session_idx);
842                                 allocated |= 1 << session_idx;
843
844                         } else if (!(allocated & (1 << session_idx))) {
845                                 DRM_ERROR("Handle already in use!\n");
846                                 r = -EINVAL;
847                                 goto out;
848                         }
849
850                         *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
851                                 amdgpu_get_ib_value(p, ib_idx, idx + 10) *
852                                 8 * 3 / 2;
853                         break;
854
855                 case 0x04000001: /* config extension */
856                 case 0x04000002: /* pic control */
857                 case 0x04000005: /* rate control */
858                 case 0x04000007: /* motion estimation */
859                 case 0x04000008: /* rdo */
860                 case 0x04000009: /* vui */
861                 case 0x05000002: /* auxiliary buffer */
862                 case 0x05000009: /* clock table */
863                         break;
864
865                 case 0x0500000c: /* hw config */
866                         switch (p->adev->asic_type) {
867 #ifdef CONFIG_DRM_AMDGPU_CIK
868                         case CHIP_KAVERI:
869                         case CHIP_MULLINS:
870 #endif
871                         case CHIP_CARRIZO:
872                                 break;
873                         default:
874                                 r = -EINVAL;
875                                 goto out;
876                         }
877                         break;
878
879                 case 0x03000001: /* encode */
880                         r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
881                                                 *size, 0);
882                         if (r)
883                                 goto out;
884
885                         r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
886                                                 *size / 3, 0);
887                         if (r)
888                                 goto out;
889                         break;
890
891                 case 0x02000001: /* destroy */
892                         destroyed |= 1 << session_idx;
893                         break;
894
895                 case 0x05000001: /* context buffer */
896                         r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
897                                                 *size * 2, 0);
898                         if (r)
899                                 goto out;
900                         break;
901
902                 case 0x05000004: /* video bitstream buffer */
903                         tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
904                         r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
905                                                 tmp, bs_idx);
906                         if (r)
907                                 goto out;
908                         break;
909
910                 case 0x05000005: /* feedback buffer */
911                         r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
912                                                 4096, fb_idx);
913                         if (r)
914                                 goto out;
915                         break;
916
917                 case 0x0500000d: /* MV buffer */
918                         r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3,
919                                                         idx + 2, *size, 0);
920                         if (r)
921                                 goto out;
922
923                         r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 8,
924                                                         idx + 7, *size / 12, 0);
925                         if (r)
926                                 goto out;
927                         break;
928
929                 default:
930                         DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
931                         r = -EINVAL;
932                         goto out;
933                 }
934
935                 if (session_idx == -1) {
936                         DRM_ERROR("no session command at start of IB\n");
937                         r = -EINVAL;
938                         goto out;
939                 }
940
941                 idx += len / 4;
942         }
943
944         if (allocated & ~created) {
945                 DRM_ERROR("New session without create command!\n");
946                 r = -ENOENT;
947         }
948
949 out:
950         if (!r) {
951                 /* No error, free all destroyed handle slots */
952                 tmp = destroyed;
953         } else {
954                 /* Error during parsing, free all allocated handle slots */
955                 tmp = allocated;
956         }
957
958         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
959                 if (tmp & (1 << i))
960                         atomic_set(&p->adev->vce.handles[i], 0);
961
962         return r;
963 }
964
965 /**
966  * amdgpu_vce_ring_parse_cs_vm - parse the command stream in VM mode
967  *
968  * @p: parser context
969  * @ib_idx: indirect buffer to use
970  */
971 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
972 {
973         struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
974         int session_idx = -1;
975         uint32_t destroyed = 0;
976         uint32_t created = 0;
977         uint32_t allocated = 0;
978         uint32_t tmp, handle = 0;
979         int i, r = 0, idx = 0;
980
981         while (idx < ib->length_dw) {
982                 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
983                 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
984
985                 if ((len < 8) || (len & 3)) {
986                         DRM_ERROR("invalid VCE command length (%d)!\n", len);
987                         r = -EINVAL;
988                         goto out;
989                 }
990
991                 switch (cmd) {
992                 case 0x00000001: /* session */
993                         handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
994                         session_idx = amdgpu_vce_validate_handle(p, handle,
995                                                                  &allocated);
996                         if (session_idx < 0) {
997                                 r = session_idx;
998                                 goto out;
999                         }
1000                         break;
1001
1002                 case 0x01000001: /* create */
1003                         created |= 1 << session_idx;
1004                         if (destroyed & (1 << session_idx)) {
1005                                 destroyed &= ~(1 << session_idx);
1006                                 allocated |= 1 << session_idx;
1007
1008                         } else if (!(allocated & (1 << session_idx))) {
1009                                 DRM_ERROR("Handle already in use!\n");
1010                                 r = -EINVAL;
1011                                 goto out;
1012                         }
1013
1014                         break;
1015
1016                 case 0x02000001: /* destroy */
1017                         destroyed |= 1 << session_idx;
1018                         break;
1019
1020                 default:
1021                         break;
1022                 }
1023
1024                 if (session_idx == -1) {
1025                         DRM_ERROR("no session command at start of IB\n");
1026                         r = -EINVAL;
1027                         goto out;
1028                 }
1029
1030                 idx += len / 4;
1031         }
1032
1033         if (allocated & ~created) {
1034                 DRM_ERROR("New session without create command!\n");
1035                 r = -ENOENT;
1036         }
1037
1038 out:
1039         if (!r) {
1040                 /* No error, free all destroyed handle slots */
1041                 tmp = destroyed;
1042                 amdgpu_ib_free(p->adev, ib, NULL);
1043         } else {
1044                 /* Error during parsing, free all allocated handle slots */
1045                 tmp = allocated;
1046         }
1047
1048         for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
1049                 if (tmp & (1 << i))
1050                         atomic_set(&p->adev->vce.handles[i], 0);
1051
1052         return r;
1053 }
1054
1055 /**
1056  * amdgpu_vce_ring_emit_ib - execute indirect buffer
1057  *
1058  * @ring: engine to use
1059  * @job: job to retrieve vmid from
1060  * @ib: the IB to execute
1061  * @flags: unused
1062  *
1063  */
1064 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
1065                                 struct amdgpu_job *job,
1066                                 struct amdgpu_ib *ib,
1067                                 uint32_t flags)
1068 {
1069         amdgpu_ring_write(ring, VCE_CMD_IB);
1070         amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1071         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1072         amdgpu_ring_write(ring, ib->length_dw);
1073 }
1074
1075 /**
1076  * amdgpu_vce_ring_emit_fence - add a fence command to the ring
1077  *
1078  * @ring: engine to use
1079  * @addr: address
1080  * @seq: sequence number
1081  * @flags: fence related flags
1082  *
1083  */
1084 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1085                                 unsigned flags)
1086 {
1087         WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1088
1089         amdgpu_ring_write(ring, VCE_CMD_FENCE);
1090         amdgpu_ring_write(ring, addr);
1091         amdgpu_ring_write(ring, upper_32_bits(addr));
1092         amdgpu_ring_write(ring, seq);
1093         amdgpu_ring_write(ring, VCE_CMD_TRAP);
1094         amdgpu_ring_write(ring, VCE_CMD_END);
1095 }
1096
1097 /**
1098  * amdgpu_vce_ring_test_ring - test if VCE ring is working
1099  *
1100  * @ring: the engine to test on
1101  *
1102  */
1103 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
1104 {
1105         struct amdgpu_device *adev = ring->adev;
1106         uint32_t rptr;
1107         unsigned i;
1108         int r, timeout = adev->usec_timeout;
1109
1110         /* skip ring test for sriov*/
1111         if (amdgpu_sriov_vf(adev))
1112                 return 0;
1113
1114         r = amdgpu_ring_alloc(ring, 16);
1115         if (r)
1116                 return r;
1117
1118         rptr = amdgpu_ring_get_rptr(ring);
1119
1120         amdgpu_ring_write(ring, VCE_CMD_END);
1121         amdgpu_ring_commit(ring);
1122
1123         for (i = 0; i < timeout; i++) {
1124                 if (amdgpu_ring_get_rptr(ring) != rptr)
1125                         break;
1126                 udelay(1);
1127         }
1128
1129         if (i >= timeout)
1130                 r = -ETIMEDOUT;
1131
1132         return r;
1133 }
1134
1135 /**
1136  * amdgpu_vce_ring_test_ib - test if VCE IBs are working
1137  *
1138  * @ring: the engine to test on
1139  * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
1140  *
1141  */
1142 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1143 {
1144         struct dma_fence *fence = NULL;
1145         long r;
1146
1147         /* skip vce ring1/2 ib test for now, since it's not reliable */
1148         if (ring != &ring->adev->vce.ring[0])
1149                 return 0;
1150
1151         r = amdgpu_vce_get_create_msg(ring, 1, NULL);
1152         if (r)
1153                 goto error;
1154
1155         r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
1156         if (r)
1157                 goto error;
1158
1159         r = dma_fence_wait_timeout(fence, false, timeout);
1160         if (r == 0)
1161                 r = -ETIMEDOUT;
1162         else if (r > 0)
1163                 r = 0;
1164
1165 error:
1166         dma_fence_put(fence);
1167         return r;
1168 }
1169
1170 enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring)
1171 {
1172         switch(ring) {
1173         case 0:
1174                 return AMDGPU_RING_PRIO_0;
1175         case 1:
1176                 return AMDGPU_RING_PRIO_1;
1177         case 2:
1178                 return AMDGPU_RING_PRIO_2;
1179         default:
1180                 return AMDGPU_RING_PRIO_0;
1181         }
1182 }
This page took 0.100925 seconds and 4 git commands to generate.