]> Git Repo - J-linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drm: panel: Add Jadard JD9365DA-H3 DSI panel
[J-linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_kms.c
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28
29 #include "amdgpu.h"
30 #include <drm/amdgpu_drm.h>
31 #include <drm/drm_drv.h>
32 #include <drm/drm_fb_helper.h>
33 #include "amdgpu_uvd.h"
34 #include "amdgpu_vce.h"
35 #include "atom.h"
36
37 #include <linux/vga_switcheroo.h>
38 #include <linux/slab.h>
39 #include <linux/uaccess.h>
40 #include <linux/pci.h>
41 #include <linux/pm_runtime.h>
42 #include "amdgpu_amdkfd.h"
43 #include "amdgpu_gem.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ras.h"
46
47 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
48 {
49         struct amdgpu_gpu_instance *gpu_instance;
50         int i;
51
52         mutex_lock(&mgpu_info.mutex);
53
54         for (i = 0; i < mgpu_info.num_gpu; i++) {
55                 gpu_instance = &(mgpu_info.gpu_ins[i]);
56                 if (gpu_instance->adev == adev) {
57                         mgpu_info.gpu_ins[i] =
58                                 mgpu_info.gpu_ins[mgpu_info.num_gpu - 1];
59                         mgpu_info.num_gpu--;
60                         if (adev->flags & AMD_IS_APU)
61                                 mgpu_info.num_apu--;
62                         else
63                                 mgpu_info.num_dgpu--;
64                         break;
65                 }
66         }
67
68         mutex_unlock(&mgpu_info.mutex);
69 }
70
71 /**
72  * amdgpu_driver_unload_kms - Main unload function for KMS.
73  *
74  * @dev: drm dev pointer
75  *
76  * This is the main unload function for KMS (all asics).
77  * Returns 0 on success.
78  */
79 void amdgpu_driver_unload_kms(struct drm_device *dev)
80 {
81         struct amdgpu_device *adev = drm_to_adev(dev);
82
83         if (adev == NULL)
84                 return;
85
86         amdgpu_unregister_gpu_instance(adev);
87
88         if (adev->rmmio == NULL)
89                 return;
90
91         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD))
92                 DRM_WARN("smart shift update failed\n");
93
94         amdgpu_acpi_fini(adev);
95         amdgpu_device_fini_hw(adev);
96 }
97
98 void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
99 {
100         struct amdgpu_gpu_instance *gpu_instance;
101
102         mutex_lock(&mgpu_info.mutex);
103
104         if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
105                 DRM_ERROR("Cannot register more gpu instance\n");
106                 mutex_unlock(&mgpu_info.mutex);
107                 return;
108         }
109
110         gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]);
111         gpu_instance->adev = adev;
112         gpu_instance->mgpu_fan_enabled = 0;
113
114         mgpu_info.num_gpu++;
115         if (adev->flags & AMD_IS_APU)
116                 mgpu_info.num_apu++;
117         else
118                 mgpu_info.num_dgpu++;
119
120         mutex_unlock(&mgpu_info.mutex);
121 }
122
123 /**
124  * amdgpu_driver_load_kms - Main load function for KMS.
125  *
126  * @adev: pointer to struct amdgpu_device
127  * @flags: device flags
128  *
129  * This is the main load function for KMS (all asics).
130  * Returns 0 on success, error on failure.
131  */
132 int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
133 {
134         struct drm_device *dev;
135         int r, acpi_status;
136
137         dev = adev_to_drm(adev);
138
139         /* amdgpu_device_init should report only fatal error
140          * like memory allocation failure or iomapping failure,
141          * or memory manager initialization failure, it must
142          * properly initialize the GPU MC controller and permit
143          * VRAM allocation
144          */
145         r = amdgpu_device_init(adev, flags);
146         if (r) {
147                 dev_err(dev->dev, "Fatal error during GPU init\n");
148                 goto out;
149         }
150
151         adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
152         if (amdgpu_device_supports_px(dev) &&
153             (amdgpu_runtime_pm != 0)) { /* enable PX as runtime mode */
154                 adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
155                 dev_info(adev->dev, "Using ATPX for runtime pm\n");
156         } else if (amdgpu_device_supports_boco(dev) &&
157                    (amdgpu_runtime_pm != 0)) { /* enable boco as runtime mode */
158                 adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
159                 dev_info(adev->dev, "Using BOCO for runtime pm\n");
160         } else if (amdgpu_device_supports_baco(dev) &&
161                    (amdgpu_runtime_pm != 0)) {
162                 switch (adev->asic_type) {
163                 case CHIP_VEGA20:
164                 case CHIP_ARCTURUS:
165                         /* enable BACO as runpm mode if runpm=1 */
166                         if (amdgpu_runtime_pm > 0)
167                                 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
168                         break;
169                 case CHIP_VEGA10:
170                         /* enable BACO as runpm mode if noretry=0 */
171                         if (!adev->gmc.noretry)
172                                 adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
173                         break;
174                 default:
175                         /* enable BACO as runpm mode on CI+ */
176                         adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
177                         break;
178                 }
179
180                 if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO)
181                         dev_info(adev->dev, "Using BACO for runtime pm\n");
182         }
183
184         /* Call ACPI methods: require modeset init
185          * but failure is not fatal
186          */
187
188         acpi_status = amdgpu_acpi_init(adev);
189         if (acpi_status)
190                 dev_dbg(dev->dev, "Error during ACPI methods call\n");
191
192         if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD))
193                 DRM_WARN("smart shift update failed\n");
194
195 out:
196         if (r)
197                 amdgpu_driver_unload_kms(dev);
198
199         return r;
200 }
201
202 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
203                                 struct drm_amdgpu_query_fw *query_fw,
204                                 struct amdgpu_device *adev)
205 {
206         switch (query_fw->fw_type) {
207         case AMDGPU_INFO_FW_VCE:
208                 fw_info->ver = adev->vce.fw_version;
209                 fw_info->feature = adev->vce.fb_version;
210                 break;
211         case AMDGPU_INFO_FW_UVD:
212                 fw_info->ver = adev->uvd.fw_version;
213                 fw_info->feature = 0;
214                 break;
215         case AMDGPU_INFO_FW_VCN:
216                 fw_info->ver = adev->vcn.fw_version;
217                 fw_info->feature = 0;
218                 break;
219         case AMDGPU_INFO_FW_GMC:
220                 fw_info->ver = adev->gmc.fw_version;
221                 fw_info->feature = 0;
222                 break;
223         case AMDGPU_INFO_FW_GFX_ME:
224                 fw_info->ver = adev->gfx.me_fw_version;
225                 fw_info->feature = adev->gfx.me_feature_version;
226                 break;
227         case AMDGPU_INFO_FW_GFX_PFP:
228                 fw_info->ver = adev->gfx.pfp_fw_version;
229                 fw_info->feature = adev->gfx.pfp_feature_version;
230                 break;
231         case AMDGPU_INFO_FW_GFX_CE:
232                 fw_info->ver = adev->gfx.ce_fw_version;
233                 fw_info->feature = adev->gfx.ce_feature_version;
234                 break;
235         case AMDGPU_INFO_FW_GFX_RLC:
236                 fw_info->ver = adev->gfx.rlc_fw_version;
237                 fw_info->feature = adev->gfx.rlc_feature_version;
238                 break;
239         case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
240                 fw_info->ver = adev->gfx.rlc_srlc_fw_version;
241                 fw_info->feature = adev->gfx.rlc_srlc_feature_version;
242                 break;
243         case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
244                 fw_info->ver = adev->gfx.rlc_srlg_fw_version;
245                 fw_info->feature = adev->gfx.rlc_srlg_feature_version;
246                 break;
247         case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
248                 fw_info->ver = adev->gfx.rlc_srls_fw_version;
249                 fw_info->feature = adev->gfx.rlc_srls_feature_version;
250                 break;
251         case AMDGPU_INFO_FW_GFX_RLCP:
252                 fw_info->ver = adev->gfx.rlcp_ucode_version;
253                 fw_info->feature = adev->gfx.rlcp_ucode_feature_version;
254                 break;
255         case AMDGPU_INFO_FW_GFX_RLCV:
256                 fw_info->ver = adev->gfx.rlcv_ucode_version;
257                 fw_info->feature = adev->gfx.rlcv_ucode_feature_version;
258                 break;
259         case AMDGPU_INFO_FW_GFX_MEC:
260                 if (query_fw->index == 0) {
261                         fw_info->ver = adev->gfx.mec_fw_version;
262                         fw_info->feature = adev->gfx.mec_feature_version;
263                 } else if (query_fw->index == 1) {
264                         fw_info->ver = adev->gfx.mec2_fw_version;
265                         fw_info->feature = adev->gfx.mec2_feature_version;
266                 } else
267                         return -EINVAL;
268                 break;
269         case AMDGPU_INFO_FW_SMC:
270                 fw_info->ver = adev->pm.fw_version;
271                 fw_info->feature = 0;
272                 break;
273         case AMDGPU_INFO_FW_TA:
274                 switch (query_fw->index) {
275                 case TA_FW_TYPE_PSP_XGMI:
276                         fw_info->ver = adev->psp.xgmi_context.context.bin_desc.fw_version;
277                         fw_info->feature = adev->psp.xgmi_context.context
278                                                    .bin_desc.feature_version;
279                         break;
280                 case TA_FW_TYPE_PSP_RAS:
281                         fw_info->ver = adev->psp.ras_context.context.bin_desc.fw_version;
282                         fw_info->feature = adev->psp.ras_context.context
283                                                    .bin_desc.feature_version;
284                         break;
285                 case TA_FW_TYPE_PSP_HDCP:
286                         fw_info->ver = adev->psp.hdcp_context.context.bin_desc.fw_version;
287                         fw_info->feature = adev->psp.hdcp_context.context
288                                                    .bin_desc.feature_version;
289                         break;
290                 case TA_FW_TYPE_PSP_DTM:
291                         fw_info->ver = adev->psp.dtm_context.context.bin_desc.fw_version;
292                         fw_info->feature = adev->psp.dtm_context.context
293                                                    .bin_desc.feature_version;
294                         break;
295                 case TA_FW_TYPE_PSP_RAP:
296                         fw_info->ver = adev->psp.rap_context.context.bin_desc.fw_version;
297                         fw_info->feature = adev->psp.rap_context.context
298                                                    .bin_desc.feature_version;
299                         break;
300                 case TA_FW_TYPE_PSP_SECUREDISPLAY:
301                         fw_info->ver = adev->psp.securedisplay_context.context.bin_desc.fw_version;
302                         fw_info->feature =
303                                 adev->psp.securedisplay_context.context.bin_desc
304                                         .feature_version;
305                         break;
306                 default:
307                         return -EINVAL;
308                 }
309                 break;
310         case AMDGPU_INFO_FW_SDMA:
311                 if (query_fw->index >= adev->sdma.num_instances)
312                         return -EINVAL;
313                 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
314                 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
315                 break;
316         case AMDGPU_INFO_FW_SOS:
317                 fw_info->ver = adev->psp.sos.fw_version;
318                 fw_info->feature = adev->psp.sos.feature_version;
319                 break;
320         case AMDGPU_INFO_FW_ASD:
321                 fw_info->ver = adev->psp.asd_context.bin_desc.fw_version;
322                 fw_info->feature = adev->psp.asd_context.bin_desc.feature_version;
323                 break;
324         case AMDGPU_INFO_FW_DMCU:
325                 fw_info->ver = adev->dm.dmcu_fw_version;
326                 fw_info->feature = 0;
327                 break;
328         case AMDGPU_INFO_FW_DMCUB:
329                 fw_info->ver = adev->dm.dmcub_fw_version;
330                 fw_info->feature = 0;
331                 break;
332         case AMDGPU_INFO_FW_TOC:
333                 fw_info->ver = adev->psp.toc.fw_version;
334                 fw_info->feature = adev->psp.toc.feature_version;
335                 break;
336         case AMDGPU_INFO_FW_CAP:
337                 fw_info->ver = adev->psp.cap_fw_version;
338                 fw_info->feature = adev->psp.cap_feature_version;
339                 break;
340         case AMDGPU_INFO_FW_MES_KIQ:
341                 fw_info->ver = adev->mes.ucode_fw_version[0];
342                 fw_info->feature = 0;
343                 break;
344         case AMDGPU_INFO_FW_MES:
345                 fw_info->ver = adev->mes.ucode_fw_version[1];
346                 fw_info->feature = 0;
347                 break;
348         default:
349                 return -EINVAL;
350         }
351         return 0;
352 }
353
354 static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
355                              struct drm_amdgpu_info *info,
356                              struct drm_amdgpu_info_hw_ip *result)
357 {
358         uint32_t ib_start_alignment = 0;
359         uint32_t ib_size_alignment = 0;
360         enum amd_ip_block_type type;
361         unsigned int num_rings = 0;
362         unsigned int i, j;
363
364         if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
365                 return -EINVAL;
366
367         switch (info->query_hw_ip.type) {
368         case AMDGPU_HW_IP_GFX:
369                 type = AMD_IP_BLOCK_TYPE_GFX;
370                 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
371                         if (adev->gfx.gfx_ring[i].sched.ready)
372                                 ++num_rings;
373                 ib_start_alignment = 32;
374                 ib_size_alignment = 32;
375                 break;
376         case AMDGPU_HW_IP_COMPUTE:
377                 type = AMD_IP_BLOCK_TYPE_GFX;
378                 for (i = 0; i < adev->gfx.num_compute_rings; i++)
379                         if (adev->gfx.compute_ring[i].sched.ready)
380                                 ++num_rings;
381                 ib_start_alignment = 32;
382                 ib_size_alignment = 32;
383                 break;
384         case AMDGPU_HW_IP_DMA:
385                 type = AMD_IP_BLOCK_TYPE_SDMA;
386                 for (i = 0; i < adev->sdma.num_instances; i++)
387                         if (adev->sdma.instance[i].ring.sched.ready)
388                                 ++num_rings;
389                 ib_start_alignment = 256;
390                 ib_size_alignment = 4;
391                 break;
392         case AMDGPU_HW_IP_UVD:
393                 type = AMD_IP_BLOCK_TYPE_UVD;
394                 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
395                         if (adev->uvd.harvest_config & (1 << i))
396                                 continue;
397
398                         if (adev->uvd.inst[i].ring.sched.ready)
399                                 ++num_rings;
400                 }
401                 ib_start_alignment = 64;
402                 ib_size_alignment = 64;
403                 break;
404         case AMDGPU_HW_IP_VCE:
405                 type = AMD_IP_BLOCK_TYPE_VCE;
406                 for (i = 0; i < adev->vce.num_rings; i++)
407                         if (adev->vce.ring[i].sched.ready)
408                                 ++num_rings;
409                 ib_start_alignment = 4;
410                 ib_size_alignment = 1;
411                 break;
412         case AMDGPU_HW_IP_UVD_ENC:
413                 type = AMD_IP_BLOCK_TYPE_UVD;
414                 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
415                         if (adev->uvd.harvest_config & (1 << i))
416                                 continue;
417
418                         for (j = 0; j < adev->uvd.num_enc_rings; j++)
419                                 if (adev->uvd.inst[i].ring_enc[j].sched.ready)
420                                         ++num_rings;
421                 }
422                 ib_start_alignment = 64;
423                 ib_size_alignment = 64;
424                 break;
425         case AMDGPU_HW_IP_VCN_DEC:
426                 type = AMD_IP_BLOCK_TYPE_VCN;
427                 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
428                         if (adev->uvd.harvest_config & (1 << i))
429                                 continue;
430
431                         if (adev->vcn.inst[i].ring_dec.sched.ready)
432                                 ++num_rings;
433                 }
434                 ib_start_alignment = 16;
435                 ib_size_alignment = 16;
436                 break;
437         case AMDGPU_HW_IP_VCN_ENC:
438                 type = AMD_IP_BLOCK_TYPE_VCN;
439                 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
440                         if (adev->uvd.harvest_config & (1 << i))
441                                 continue;
442
443                         for (j = 0; j < adev->vcn.num_enc_rings; j++)
444                                 if (adev->vcn.inst[i].ring_enc[j].sched.ready)
445                                         ++num_rings;
446                 }
447                 ib_start_alignment = 64;
448                 ib_size_alignment = 1;
449                 break;
450         case AMDGPU_HW_IP_VCN_JPEG:
451                 type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
452                         AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
453
454                 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
455                         if (adev->jpeg.harvest_config & (1 << i))
456                                 continue;
457
458                         if (adev->jpeg.inst[i].ring_dec.sched.ready)
459                                 ++num_rings;
460                 }
461                 ib_start_alignment = 16;
462                 ib_size_alignment = 16;
463                 break;
464         default:
465                 return -EINVAL;
466         }
467
468         for (i = 0; i < adev->num_ip_blocks; i++)
469                 if (adev->ip_blocks[i].version->type == type &&
470                     adev->ip_blocks[i].status.valid)
471                         break;
472
473         if (i == adev->num_ip_blocks)
474                 return 0;
475
476         num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type],
477                         num_rings);
478
479         result->hw_ip_version_major = adev->ip_blocks[i].version->major;
480         result->hw_ip_version_minor = adev->ip_blocks[i].version->minor;
481
482         if (adev->asic_type >= CHIP_VEGA10) {
483                 switch (type) {
484                 case AMD_IP_BLOCK_TYPE_GFX:
485                         result->ip_discovery_version = adev->ip_versions[GC_HWIP][0];
486                         break;
487                 case AMD_IP_BLOCK_TYPE_SDMA:
488                         result->ip_discovery_version = adev->ip_versions[SDMA0_HWIP][0];
489                         break;
490                 case AMD_IP_BLOCK_TYPE_UVD:
491                 case AMD_IP_BLOCK_TYPE_VCN:
492                 case AMD_IP_BLOCK_TYPE_JPEG:
493                         result->ip_discovery_version = adev->ip_versions[UVD_HWIP][0];
494                         break;
495                 case AMD_IP_BLOCK_TYPE_VCE:
496                         result->ip_discovery_version = adev->ip_versions[VCE_HWIP][0];
497                         break;
498                 default:
499                         result->ip_discovery_version = 0;
500                         break;
501                 }
502         } else {
503                 result->ip_discovery_version = 0;
504         }
505         result->capabilities_flags = 0;
506         result->available_rings = (1 << num_rings) - 1;
507         result->ib_start_alignment = ib_start_alignment;
508         result->ib_size_alignment = ib_size_alignment;
509         return 0;
510 }
511
512 /*
513  * Userspace get information ioctl
514  */
515 /**
516  * amdgpu_info_ioctl - answer a device specific request.
517  *
518  * @dev: drm device pointer
519  * @data: request object
520  * @filp: drm filp
521  *
522  * This function is used to pass device specific parameters to the userspace
523  * drivers.  Examples include: pci device id, pipeline parms, tiling params,
524  * etc. (all asics).
525  * Returns 0 on success, -EINVAL on failure.
526  */
527 int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
528 {
529         struct amdgpu_device *adev = drm_to_adev(dev);
530         struct drm_amdgpu_info *info = data;
531         struct amdgpu_mode_info *minfo = &adev->mode_info;
532         void __user *out = (void __user *)(uintptr_t)info->return_pointer;
533         uint32_t size = info->return_size;
534         struct drm_crtc *crtc;
535         uint32_t ui32 = 0;
536         uint64_t ui64 = 0;
537         int i, found;
538         int ui32_size = sizeof(ui32);
539
540         if (!info->return_size || !info->return_pointer)
541                 return -EINVAL;
542
543         switch (info->query) {
544         case AMDGPU_INFO_ACCEL_WORKING:
545                 ui32 = adev->accel_working;
546                 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
547         case AMDGPU_INFO_CRTC_FROM_ID:
548                 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
549                         crtc = (struct drm_crtc *)minfo->crtcs[i];
550                         if (crtc && crtc->base.id == info->mode_crtc.id) {
551                                 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
552                                 ui32 = amdgpu_crtc->crtc_id;
553                                 found = 1;
554                                 break;
555                         }
556                 }
557                 if (!found) {
558                         DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
559                         return -EINVAL;
560                 }
561                 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
562         case AMDGPU_INFO_HW_IP_INFO: {
563                 struct drm_amdgpu_info_hw_ip ip = {};
564                 int ret;
565
566                 ret = amdgpu_hw_ip_info(adev, info, &ip);
567                 if (ret)
568                         return ret;
569
570                 ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip)));
571                 return ret ? -EFAULT : 0;
572         }
573         case AMDGPU_INFO_HW_IP_COUNT: {
574                 enum amd_ip_block_type type;
575                 uint32_t count = 0;
576
577                 switch (info->query_hw_ip.type) {
578                 case AMDGPU_HW_IP_GFX:
579                         type = AMD_IP_BLOCK_TYPE_GFX;
580                         break;
581                 case AMDGPU_HW_IP_COMPUTE:
582                         type = AMD_IP_BLOCK_TYPE_GFX;
583                         break;
584                 case AMDGPU_HW_IP_DMA:
585                         type = AMD_IP_BLOCK_TYPE_SDMA;
586                         break;
587                 case AMDGPU_HW_IP_UVD:
588                         type = AMD_IP_BLOCK_TYPE_UVD;
589                         break;
590                 case AMDGPU_HW_IP_VCE:
591                         type = AMD_IP_BLOCK_TYPE_VCE;
592                         break;
593                 case AMDGPU_HW_IP_UVD_ENC:
594                         type = AMD_IP_BLOCK_TYPE_UVD;
595                         break;
596                 case AMDGPU_HW_IP_VCN_DEC:
597                 case AMDGPU_HW_IP_VCN_ENC:
598                         type = AMD_IP_BLOCK_TYPE_VCN;
599                         break;
600                 case AMDGPU_HW_IP_VCN_JPEG:
601                         type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
602                                 AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
603                         break;
604                 default:
605                         return -EINVAL;
606                 }
607
608                 for (i = 0; i < adev->num_ip_blocks; i++)
609                         if (adev->ip_blocks[i].version->type == type &&
610                             adev->ip_blocks[i].status.valid &&
611                             count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
612                                 count++;
613
614                 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
615         }
616         case AMDGPU_INFO_TIMESTAMP:
617                 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
618                 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
619         case AMDGPU_INFO_FW_VERSION: {
620                 struct drm_amdgpu_info_firmware fw_info;
621                 int ret;
622
623                 /* We only support one instance of each IP block right now. */
624                 if (info->query_fw.ip_instance != 0)
625                         return -EINVAL;
626
627                 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
628                 if (ret)
629                         return ret;
630
631                 return copy_to_user(out, &fw_info,
632                                     min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
633         }
634         case AMDGPU_INFO_NUM_BYTES_MOVED:
635                 ui64 = atomic64_read(&adev->num_bytes_moved);
636                 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
637         case AMDGPU_INFO_NUM_EVICTIONS:
638                 ui64 = atomic64_read(&adev->num_evictions);
639                 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
640         case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
641                 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
642                 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
643         case AMDGPU_INFO_VRAM_USAGE:
644                 ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
645                 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
646         case AMDGPU_INFO_VIS_VRAM_USAGE:
647                 ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
648                 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
649         case AMDGPU_INFO_GTT_USAGE:
650                 ui64 = ttm_resource_manager_usage(&adev->mman.gtt_mgr.manager);
651                 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
652         case AMDGPU_INFO_GDS_CONFIG: {
653                 struct drm_amdgpu_info_gds gds_info;
654
655                 memset(&gds_info, 0, sizeof(gds_info));
656                 gds_info.compute_partition_size = adev->gds.gds_size;
657                 gds_info.gds_total_size = adev->gds.gds_size;
658                 gds_info.gws_per_compute_partition = adev->gds.gws_size;
659                 gds_info.oa_per_compute_partition = adev->gds.oa_size;
660                 return copy_to_user(out, &gds_info,
661                                     min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
662         }
663         case AMDGPU_INFO_VRAM_GTT: {
664                 struct drm_amdgpu_info_vram_gtt vram_gtt;
665
666                 vram_gtt.vram_size = adev->gmc.real_vram_size -
667                         atomic64_read(&adev->vram_pin_size) -
668                         AMDGPU_VM_RESERVED_VRAM;
669                 vram_gtt.vram_cpu_accessible_size =
670                         min(adev->gmc.visible_vram_size -
671                             atomic64_read(&adev->visible_pin_size),
672                             vram_gtt.vram_size);
673                 vram_gtt.gtt_size = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)->size;
674                 vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
675                 return copy_to_user(out, &vram_gtt,
676                                     min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
677         }
678         case AMDGPU_INFO_MEMORY: {
679                 struct drm_amdgpu_memory_info mem;
680                 struct ttm_resource_manager *gtt_man =
681                         &adev->mman.gtt_mgr.manager;
682                 struct ttm_resource_manager *vram_man =
683                         &adev->mman.vram_mgr.manager;
684
685                 memset(&mem, 0, sizeof(mem));
686                 mem.vram.total_heap_size = adev->gmc.real_vram_size;
687                 mem.vram.usable_heap_size = adev->gmc.real_vram_size -
688                         atomic64_read(&adev->vram_pin_size) -
689                         AMDGPU_VM_RESERVED_VRAM;
690                 mem.vram.heap_usage =
691                         ttm_resource_manager_usage(vram_man);
692                 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
693
694                 mem.cpu_accessible_vram.total_heap_size =
695                         adev->gmc.visible_vram_size;
696                 mem.cpu_accessible_vram.usable_heap_size =
697                         min(adev->gmc.visible_vram_size -
698                             atomic64_read(&adev->visible_pin_size),
699                             mem.vram.usable_heap_size);
700                 mem.cpu_accessible_vram.heap_usage =
701                         amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
702                 mem.cpu_accessible_vram.max_allocation =
703                         mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
704
705                 mem.gtt.total_heap_size = gtt_man->size;
706                 mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
707                         atomic64_read(&adev->gart_pin_size);
708                 mem.gtt.heap_usage = ttm_resource_manager_usage(gtt_man);
709                 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
710
711                 return copy_to_user(out, &mem,
712                                     min((size_t)size, sizeof(mem)))
713                                     ? -EFAULT : 0;
714         }
715         case AMDGPU_INFO_READ_MMR_REG: {
716                 unsigned n, alloc_size;
717                 uint32_t *regs;
718                 unsigned se_num = (info->read_mmr_reg.instance >>
719                                    AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
720                                   AMDGPU_INFO_MMR_SE_INDEX_MASK;
721                 unsigned sh_num = (info->read_mmr_reg.instance >>
722                                    AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
723                                   AMDGPU_INFO_MMR_SH_INDEX_MASK;
724
725                 /* set full masks if the userspace set all bits
726                  * in the bitfields */
727                 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
728                         se_num = 0xffffffff;
729                 else if (se_num >= AMDGPU_GFX_MAX_SE)
730                         return -EINVAL;
731                 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
732                         sh_num = 0xffffffff;
733                 else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE)
734                         return -EINVAL;
735
736                 if (info->read_mmr_reg.count > 128)
737                         return -EINVAL;
738
739                 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
740                 if (!regs)
741                         return -ENOMEM;
742                 alloc_size = info->read_mmr_reg.count * sizeof(*regs);
743
744                 amdgpu_gfx_off_ctrl(adev, false);
745                 for (i = 0; i < info->read_mmr_reg.count; i++) {
746                         if (amdgpu_asic_read_register(adev, se_num, sh_num,
747                                                       info->read_mmr_reg.dword_offset + i,
748                                                       &regs[i])) {
749                                 DRM_DEBUG_KMS("unallowed offset %#x\n",
750                                               info->read_mmr_reg.dword_offset + i);
751                                 kfree(regs);
752                                 amdgpu_gfx_off_ctrl(adev, true);
753                                 return -EFAULT;
754                         }
755                 }
756                 amdgpu_gfx_off_ctrl(adev, true);
757                 n = copy_to_user(out, regs, min(size, alloc_size));
758                 kfree(regs);
759                 return n ? -EFAULT : 0;
760         }
761         case AMDGPU_INFO_DEV_INFO: {
762                 struct drm_amdgpu_info_device *dev_info;
763                 uint64_t vm_size;
764                 int ret;
765
766                 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
767                 if (!dev_info)
768                         return -ENOMEM;
769
770                 dev_info->device_id = adev->pdev->device;
771                 dev_info->chip_rev = adev->rev_id;
772                 dev_info->external_rev = adev->external_rev_id;
773                 dev_info->pci_rev = adev->pdev->revision;
774                 dev_info->family = adev->family;
775                 dev_info->num_shader_engines = adev->gfx.config.max_shader_engines;
776                 dev_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
777                 /* return all clocks in KHz */
778                 dev_info->gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
779                 if (adev->pm.dpm_enabled) {
780                         dev_info->max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
781                         dev_info->max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
782                 } else {
783                         dev_info->max_engine_clock = adev->clock.default_sclk * 10;
784                         dev_info->max_memory_clock = adev->clock.default_mclk * 10;
785                 }
786                 dev_info->enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
787                 dev_info->num_rb_pipes = adev->gfx.config.max_backends_per_se *
788                         adev->gfx.config.max_shader_engines;
789                 dev_info->num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
790                 dev_info->_pad = 0;
791                 dev_info->ids_flags = 0;
792                 if (adev->flags & AMD_IS_APU)
793                         dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
794                 if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
795                         dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
796                 if (amdgpu_is_tmz(adev))
797                         dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
798
799                 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
800                 vm_size -= AMDGPU_VA_RESERVED_SIZE;
801
802                 /* Older VCE FW versions are buggy and can handle only 40bits */
803                 if (adev->vce.fw_version &&
804                     adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
805                         vm_size = min(vm_size, 1ULL << 40);
806
807                 dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
808                 dev_info->virtual_address_max =
809                         min(vm_size, AMDGPU_GMC_HOLE_START);
810
811                 if (vm_size > AMDGPU_GMC_HOLE_START) {
812                         dev_info->high_va_offset = AMDGPU_GMC_HOLE_END;
813                         dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
814                 }
815                 dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
816                 dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
817                 dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
818                 dev_info->cu_active_number = adev->gfx.cu_info.number;
819                 dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
820                 dev_info->ce_ram_size = adev->gfx.ce_ram_size;
821                 memcpy(&dev_info->cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
822                        sizeof(adev->gfx.cu_info.ao_cu_bitmap));
823                 memcpy(&dev_info->cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
824                        sizeof(adev->gfx.cu_info.bitmap));
825                 dev_info->vram_type = adev->gmc.vram_type;
826                 dev_info->vram_bit_width = adev->gmc.vram_width;
827                 dev_info->vce_harvest_config = adev->vce.harvest_config;
828                 dev_info->gc_double_offchip_lds_buf =
829                         adev->gfx.config.double_offchip_lds_buf;
830                 dev_info->wave_front_size = adev->gfx.cu_info.wave_front_size;
831                 dev_info->num_shader_visible_vgprs = adev->gfx.config.max_gprs;
832                 dev_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
833                 dev_info->num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
834                 dev_info->gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
835                 dev_info->gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
836                 dev_info->max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
837
838                 if (adev->family >= AMDGPU_FAMILY_NV)
839                         dev_info->pa_sc_tile_steering_override =
840                                 adev->gfx.config.pa_sc_tile_steering_override;
841
842                 dev_info->tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
843
844                 ret = copy_to_user(out, dev_info,
845                                    min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
846                 kfree(dev_info);
847                 return ret;
848         }
849         case AMDGPU_INFO_VCE_CLOCK_TABLE: {
850                 unsigned i;
851                 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
852                 struct amd_vce_state *vce_state;
853
854                 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
855                         vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
856                         if (vce_state) {
857                                 vce_clk_table.entries[i].sclk = vce_state->sclk;
858                                 vce_clk_table.entries[i].mclk = vce_state->mclk;
859                                 vce_clk_table.entries[i].eclk = vce_state->evclk;
860                                 vce_clk_table.num_valid_entries++;
861                         }
862                 }
863
864                 return copy_to_user(out, &vce_clk_table,
865                                     min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
866         }
867         case AMDGPU_INFO_VBIOS: {
868                 uint32_t bios_size = adev->bios_size;
869
870                 switch (info->vbios_info.type) {
871                 case AMDGPU_INFO_VBIOS_SIZE:
872                         return copy_to_user(out, &bios_size,
873                                         min((size_t)size, sizeof(bios_size)))
874                                         ? -EFAULT : 0;
875                 case AMDGPU_INFO_VBIOS_IMAGE: {
876                         uint8_t *bios;
877                         uint32_t bios_offset = info->vbios_info.offset;
878
879                         if (bios_offset >= bios_size)
880                                 return -EINVAL;
881
882                         bios = adev->bios + bios_offset;
883                         return copy_to_user(out, bios,
884                                             min((size_t)size, (size_t)(bios_size - bios_offset)))
885                                         ? -EFAULT : 0;
886                 }
887                 case AMDGPU_INFO_VBIOS_INFO: {
888                         struct drm_amdgpu_info_vbios vbios_info = {};
889                         struct atom_context *atom_context;
890
891                         atom_context = adev->mode_info.atom_context;
892                         memcpy(vbios_info.name, atom_context->name, sizeof(atom_context->name));
893                         memcpy(vbios_info.vbios_pn, atom_context->vbios_pn, sizeof(atom_context->vbios_pn));
894                         vbios_info.version = atom_context->version;
895                         memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
896                                                 sizeof(atom_context->vbios_ver_str));
897                         memcpy(vbios_info.date, atom_context->date, sizeof(atom_context->date));
898
899                         return copy_to_user(out, &vbios_info,
900                                                 min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0;
901                 }
902                 default:
903                         DRM_DEBUG_KMS("Invalid request %d\n",
904                                         info->vbios_info.type);
905                         return -EINVAL;
906                 }
907         }
908         case AMDGPU_INFO_NUM_HANDLES: {
909                 struct drm_amdgpu_info_num_handles handle;
910
911                 switch (info->query_hw_ip.type) {
912                 case AMDGPU_HW_IP_UVD:
913                         /* Starting Polaris, we support unlimited UVD handles */
914                         if (adev->asic_type < CHIP_POLARIS10) {
915                                 handle.uvd_max_handles = adev->uvd.max_handles;
916                                 handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
917
918                                 return copy_to_user(out, &handle,
919                                         min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
920                         } else {
921                                 return -ENODATA;
922                         }
923
924                         break;
925                 default:
926                         return -EINVAL;
927                 }
928         }
929         case AMDGPU_INFO_SENSOR: {
930                 if (!adev->pm.dpm_enabled)
931                         return -ENOENT;
932
933                 switch (info->sensor_info.type) {
934                 case AMDGPU_INFO_SENSOR_GFX_SCLK:
935                         /* get sclk in Mhz */
936                         if (amdgpu_dpm_read_sensor(adev,
937                                                    AMDGPU_PP_SENSOR_GFX_SCLK,
938                                                    (void *)&ui32, &ui32_size)) {
939                                 return -EINVAL;
940                         }
941                         ui32 /= 100;
942                         break;
943                 case AMDGPU_INFO_SENSOR_GFX_MCLK:
944                         /* get mclk in Mhz */
945                         if (amdgpu_dpm_read_sensor(adev,
946                                                    AMDGPU_PP_SENSOR_GFX_MCLK,
947                                                    (void *)&ui32, &ui32_size)) {
948                                 return -EINVAL;
949                         }
950                         ui32 /= 100;
951                         break;
952                 case AMDGPU_INFO_SENSOR_GPU_TEMP:
953                         /* get temperature in millidegrees C */
954                         if (amdgpu_dpm_read_sensor(adev,
955                                                    AMDGPU_PP_SENSOR_GPU_TEMP,
956                                                    (void *)&ui32, &ui32_size)) {
957                                 return -EINVAL;
958                         }
959                         break;
960                 case AMDGPU_INFO_SENSOR_GPU_LOAD:
961                         /* get GPU load */
962                         if (amdgpu_dpm_read_sensor(adev,
963                                                    AMDGPU_PP_SENSOR_GPU_LOAD,
964                                                    (void *)&ui32, &ui32_size)) {
965                                 return -EINVAL;
966                         }
967                         break;
968                 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
969                         /* get average GPU power */
970                         if (amdgpu_dpm_read_sensor(adev,
971                                                    AMDGPU_PP_SENSOR_GPU_POWER,
972                                                    (void *)&ui32, &ui32_size)) {
973                                 return -EINVAL;
974                         }
975                         ui32 >>= 8;
976                         break;
977                 case AMDGPU_INFO_SENSOR_VDDNB:
978                         /* get VDDNB in millivolts */
979                         if (amdgpu_dpm_read_sensor(adev,
980                                                    AMDGPU_PP_SENSOR_VDDNB,
981                                                    (void *)&ui32, &ui32_size)) {
982                                 return -EINVAL;
983                         }
984                         break;
985                 case AMDGPU_INFO_SENSOR_VDDGFX:
986                         /* get VDDGFX in millivolts */
987                         if (amdgpu_dpm_read_sensor(adev,
988                                                    AMDGPU_PP_SENSOR_VDDGFX,
989                                                    (void *)&ui32, &ui32_size)) {
990                                 return -EINVAL;
991                         }
992                         break;
993                 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
994                         /* get stable pstate sclk in Mhz */
995                         if (amdgpu_dpm_read_sensor(adev,
996                                                    AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
997                                                    (void *)&ui32, &ui32_size)) {
998                                 return -EINVAL;
999                         }
1000                         ui32 /= 100;
1001                         break;
1002                 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
1003                         /* get stable pstate mclk in Mhz */
1004                         if (amdgpu_dpm_read_sensor(adev,
1005                                                    AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
1006                                                    (void *)&ui32, &ui32_size)) {
1007                                 return -EINVAL;
1008                         }
1009                         ui32 /= 100;
1010                         break;
1011                 default:
1012                         DRM_DEBUG_KMS("Invalid request %d\n",
1013                                       info->sensor_info.type);
1014                         return -EINVAL;
1015                 }
1016                 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
1017         }
1018         case AMDGPU_INFO_VRAM_LOST_COUNTER:
1019                 ui32 = atomic_read(&adev->vram_lost_counter);
1020                 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
1021         case AMDGPU_INFO_RAS_ENABLED_FEATURES: {
1022                 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1023                 uint64_t ras_mask;
1024
1025                 if (!ras)
1026                         return -EINVAL;
1027                 ras_mask = (uint64_t)adev->ras_enabled << 32 | ras->features;
1028
1029                 return copy_to_user(out, &ras_mask,
1030                                 min_t(u64, size, sizeof(ras_mask))) ?
1031                         -EFAULT : 0;
1032         }
1033         case AMDGPU_INFO_VIDEO_CAPS: {
1034                 const struct amdgpu_video_codecs *codecs;
1035                 struct drm_amdgpu_info_video_caps *caps;
1036                 int r;
1037
1038                 switch (info->video_cap.type) {
1039                 case AMDGPU_INFO_VIDEO_CAPS_DECODE:
1040                         r = amdgpu_asic_query_video_codecs(adev, false, &codecs);
1041                         if (r)
1042                                 return -EINVAL;
1043                         break;
1044                 case AMDGPU_INFO_VIDEO_CAPS_ENCODE:
1045                         r = amdgpu_asic_query_video_codecs(adev, true, &codecs);
1046                         if (r)
1047                                 return -EINVAL;
1048                         break;
1049                 default:
1050                         DRM_DEBUG_KMS("Invalid request %d\n",
1051                                       info->video_cap.type);
1052                         return -EINVAL;
1053                 }
1054
1055                 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
1056                 if (!caps)
1057                         return -ENOMEM;
1058
1059                 for (i = 0; i < codecs->codec_count; i++) {
1060                         int idx = codecs->codec_array[i].codec_type;
1061
1062                         switch (idx) {
1063                         case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2:
1064                         case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4:
1065                         case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1:
1066                         case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC:
1067                         case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC:
1068                         case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG:
1069                         case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9:
1070                         case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1:
1071                                 caps->codec_info[idx].valid = 1;
1072                                 caps->codec_info[idx].max_width =
1073                                         codecs->codec_array[i].max_width;
1074                                 caps->codec_info[idx].max_height =
1075                                         codecs->codec_array[i].max_height;
1076                                 caps->codec_info[idx].max_pixels_per_frame =
1077                                         codecs->codec_array[i].max_pixels_per_frame;
1078                                 caps->codec_info[idx].max_level =
1079                                         codecs->codec_array[i].max_level;
1080                                 break;
1081                         default:
1082                                 break;
1083                         }
1084                 }
1085                 r = copy_to_user(out, caps,
1086                                  min((size_t)size, sizeof(*caps))) ? -EFAULT : 0;
1087                 kfree(caps);
1088                 return r;
1089         }
1090         default:
1091                 DRM_DEBUG_KMS("Invalid request %d\n", info->query);
1092                 return -EINVAL;
1093         }
1094         return 0;
1095 }
1096
1097
1098 /*
1099  * Outdated mess for old drm with Xorg being in charge (void function now).
1100  */
1101 /**
1102  * amdgpu_driver_lastclose_kms - drm callback for last close
1103  *
1104  * @dev: drm dev pointer
1105  *
1106  * Switch vga_switcheroo state after last close (all asics).
1107  */
1108 void amdgpu_driver_lastclose_kms(struct drm_device *dev)
1109 {
1110         drm_fb_helper_lastclose(dev);
1111         vga_switcheroo_process_delayed_switch();
1112 }
1113
1114 /**
1115  * amdgpu_driver_open_kms - drm callback for open
1116  *
1117  * @dev: drm dev pointer
1118  * @file_priv: drm file
1119  *
1120  * On device open, init vm on cayman+ (all asics).
1121  * Returns 0 on success, error on failure.
1122  */
1123 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
1124 {
1125         struct amdgpu_device *adev = drm_to_adev(dev);
1126         struct amdgpu_fpriv *fpriv;
1127         int r, pasid;
1128
1129         /* Ensure IB tests are run on ring */
1130         flush_delayed_work(&adev->delayed_init_work);
1131
1132
1133         if (amdgpu_ras_intr_triggered()) {
1134                 DRM_ERROR("RAS Intr triggered, device disabled!!");
1135                 return -EHWPOISON;
1136         }
1137
1138         file_priv->driver_priv = NULL;
1139
1140         r = pm_runtime_get_sync(dev->dev);
1141         if (r < 0)
1142                 goto pm_put;
1143
1144         fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
1145         if (unlikely(!fpriv)) {
1146                 r = -ENOMEM;
1147                 goto out_suspend;
1148         }
1149
1150         pasid = amdgpu_pasid_alloc(16);
1151         if (pasid < 0) {
1152                 dev_warn(adev->dev, "No more PASIDs available!");
1153                 pasid = 0;
1154         }
1155
1156         r = amdgpu_vm_init(adev, &fpriv->vm);
1157         if (r)
1158                 goto error_pasid;
1159
1160         r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);
1161         if (r)
1162                 goto error_vm;
1163
1164         fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
1165         if (!fpriv->prt_va) {
1166                 r = -ENOMEM;
1167                 goto error_vm;
1168         }
1169
1170         if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1171                 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
1172
1173                 r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
1174                                                 &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
1175                 if (r)
1176                         goto error_vm;
1177         }
1178
1179         mutex_init(&fpriv->bo_list_lock);
1180         idr_init_base(&fpriv->bo_list_handles, 1);
1181
1182         amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev);
1183
1184         file_priv->driver_priv = fpriv;
1185         goto out_suspend;
1186
1187 error_vm:
1188         amdgpu_vm_fini(adev, &fpriv->vm);
1189
1190 error_pasid:
1191         if (pasid) {
1192                 amdgpu_pasid_free(pasid);
1193                 amdgpu_vm_set_pasid(adev, &fpriv->vm, 0);
1194         }
1195
1196         kfree(fpriv);
1197
1198 out_suspend:
1199         pm_runtime_mark_last_busy(dev->dev);
1200 pm_put:
1201         pm_runtime_put_autosuspend(dev->dev);
1202
1203         return r;
1204 }
1205
1206 /**
1207  * amdgpu_driver_postclose_kms - drm callback for post close
1208  *
1209  * @dev: drm dev pointer
1210  * @file_priv: drm file
1211  *
1212  * On device post close, tear down vm on cayman+ (all asics).
1213  */
1214 void amdgpu_driver_postclose_kms(struct drm_device *dev,
1215                                  struct drm_file *file_priv)
1216 {
1217         struct amdgpu_device *adev = drm_to_adev(dev);
1218         struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
1219         struct amdgpu_bo_list *list;
1220         struct amdgpu_bo *pd;
1221         u32 pasid;
1222         int handle;
1223
1224         if (!fpriv)
1225                 return;
1226
1227         pm_runtime_get_sync(dev->dev);
1228
1229         if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
1230                 amdgpu_uvd_free_handles(adev, file_priv);
1231         if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
1232                 amdgpu_vce_free_handles(adev, file_priv);
1233
1234         if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1235                 /* TODO: how to handle reserve failure */
1236                 BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
1237                 amdgpu_vm_bo_del(adev, fpriv->csa_va);
1238                 fpriv->csa_va = NULL;
1239                 amdgpu_bo_unreserve(adev->virt.csa_obj);
1240         }
1241
1242         pasid = fpriv->vm.pasid;
1243         pd = amdgpu_bo_ref(fpriv->vm.root.bo);
1244         if (!WARN_ON(amdgpu_bo_reserve(pd, true))) {
1245                 amdgpu_vm_bo_del(adev, fpriv->prt_va);
1246                 amdgpu_bo_unreserve(pd);
1247         }
1248
1249         amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
1250         amdgpu_vm_fini(adev, &fpriv->vm);
1251
1252         if (pasid)
1253                 amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid);
1254         amdgpu_bo_unref(&pd);
1255
1256         idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
1257                 amdgpu_bo_list_put(list);
1258
1259         idr_destroy(&fpriv->bo_list_handles);
1260         mutex_destroy(&fpriv->bo_list_lock);
1261
1262         kfree(fpriv);
1263         file_priv->driver_priv = NULL;
1264
1265         pm_runtime_mark_last_busy(dev->dev);
1266         pm_runtime_put_autosuspend(dev->dev);
1267 }
1268
1269
1270 void amdgpu_driver_release_kms(struct drm_device *dev)
1271 {
1272         struct amdgpu_device *adev = drm_to_adev(dev);
1273
1274         amdgpu_device_fini_sw(adev);
1275         pci_set_drvdata(adev->pdev, NULL);
1276 }
1277
1278 /*
1279  * VBlank related functions.
1280  */
1281 /**
1282  * amdgpu_get_vblank_counter_kms - get frame count
1283  *
1284  * @crtc: crtc to get the frame count from
1285  *
1286  * Gets the frame count on the requested crtc (all asics).
1287  * Returns frame count on success, -EINVAL on failure.
1288  */
1289 u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc)
1290 {
1291         struct drm_device *dev = crtc->dev;
1292         unsigned int pipe = crtc->index;
1293         struct amdgpu_device *adev = drm_to_adev(dev);
1294         int vpos, hpos, stat;
1295         u32 count;
1296
1297         if (pipe >= adev->mode_info.num_crtc) {
1298                 DRM_ERROR("Invalid crtc %u\n", pipe);
1299                 return -EINVAL;
1300         }
1301
1302         /* The hw increments its frame counter at start of vsync, not at start
1303          * of vblank, as is required by DRM core vblank counter handling.
1304          * Cook the hw count here to make it appear to the caller as if it
1305          * incremented at start of vblank. We measure distance to start of
1306          * vblank in vpos. vpos therefore will be >= 0 between start of vblank
1307          * and start of vsync, so vpos >= 0 means to bump the hw frame counter
1308          * result by 1 to give the proper appearance to caller.
1309          */
1310         if (adev->mode_info.crtcs[pipe]) {
1311                 /* Repeat readout if needed to provide stable result if
1312                  * we cross start of vsync during the queries.
1313                  */
1314                 do {
1315                         count = amdgpu_display_vblank_get_counter(adev, pipe);
1316                         /* Ask amdgpu_display_get_crtc_scanoutpos to return
1317                          * vpos as distance to start of vblank, instead of
1318                          * regular vertical scanout pos.
1319                          */
1320                         stat = amdgpu_display_get_crtc_scanoutpos(
1321                                 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
1322                                 &vpos, &hpos, NULL, NULL,
1323                                 &adev->mode_info.crtcs[pipe]->base.hwmode);
1324                 } while (count != amdgpu_display_vblank_get_counter(adev, pipe));
1325
1326                 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
1327                     (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
1328                         DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
1329                 } else {
1330                         DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
1331                                       pipe, vpos);
1332
1333                         /* Bump counter if we are at >= leading edge of vblank,
1334                          * but before vsync where vpos would turn negative and
1335                          * the hw counter really increments.
1336                          */
1337                         if (vpos >= 0)
1338                                 count++;
1339                 }
1340         } else {
1341                 /* Fallback to use value as is. */
1342                 count = amdgpu_display_vblank_get_counter(adev, pipe);
1343                 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
1344         }
1345
1346         return count;
1347 }
1348
1349 /**
1350  * amdgpu_enable_vblank_kms - enable vblank interrupt
1351  *
1352  * @crtc: crtc to enable vblank interrupt for
1353  *
1354  * Enable the interrupt on the requested crtc (all asics).
1355  * Returns 0 on success, -EINVAL on failure.
1356  */
1357 int amdgpu_enable_vblank_kms(struct drm_crtc *crtc)
1358 {
1359         struct drm_device *dev = crtc->dev;
1360         unsigned int pipe = crtc->index;
1361         struct amdgpu_device *adev = drm_to_adev(dev);
1362         int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1363
1364         return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
1365 }
1366
1367 /**
1368  * amdgpu_disable_vblank_kms - disable vblank interrupt
1369  *
1370  * @crtc: crtc to disable vblank interrupt for
1371  *
1372  * Disable the interrupt on the requested crtc (all asics).
1373  */
1374 void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
1375 {
1376         struct drm_device *dev = crtc->dev;
1377         unsigned int pipe = crtc->index;
1378         struct amdgpu_device *adev = drm_to_adev(dev);
1379         int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1380
1381         amdgpu_irq_put(adev, &adev->crtc_irq, idx);
1382 }
1383
1384 /*
1385  * Debugfs info
1386  */
1387 #if defined(CONFIG_DEBUG_FS)
1388
1389 static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
1390 {
1391         struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
1392         struct drm_amdgpu_info_firmware fw_info;
1393         struct drm_amdgpu_query_fw query_fw;
1394         struct atom_context *ctx = adev->mode_info.atom_context;
1395         uint8_t smu_program, smu_major, smu_minor, smu_debug;
1396         int ret, i;
1397
1398         static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = {
1399 #define TA_FW_NAME(type) [TA_FW_TYPE_PSP_##type] = #type
1400                 TA_FW_NAME(XGMI),
1401                 TA_FW_NAME(RAS),
1402                 TA_FW_NAME(HDCP),
1403                 TA_FW_NAME(DTM),
1404                 TA_FW_NAME(RAP),
1405                 TA_FW_NAME(SECUREDISPLAY),
1406 #undef TA_FW_NAME
1407         };
1408
1409         /* VCE */
1410         query_fw.fw_type = AMDGPU_INFO_FW_VCE;
1411         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1412         if (ret)
1413                 return ret;
1414         seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
1415                    fw_info.feature, fw_info.ver);
1416
1417         /* UVD */
1418         query_fw.fw_type = AMDGPU_INFO_FW_UVD;
1419         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1420         if (ret)
1421                 return ret;
1422         seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
1423                    fw_info.feature, fw_info.ver);
1424
1425         /* GMC */
1426         query_fw.fw_type = AMDGPU_INFO_FW_GMC;
1427         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1428         if (ret)
1429                 return ret;
1430         seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
1431                    fw_info.feature, fw_info.ver);
1432
1433         /* ME */
1434         query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
1435         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1436         if (ret)
1437                 return ret;
1438         seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
1439                    fw_info.feature, fw_info.ver);
1440
1441         /* PFP */
1442         query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
1443         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1444         if (ret)
1445                 return ret;
1446         seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
1447                    fw_info.feature, fw_info.ver);
1448
1449         /* CE */
1450         query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
1451         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1452         if (ret)
1453                 return ret;
1454         seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
1455                    fw_info.feature, fw_info.ver);
1456
1457         /* RLC */
1458         query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
1459         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1460         if (ret)
1461                 return ret;
1462         seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
1463                    fw_info.feature, fw_info.ver);
1464
1465         /* RLC SAVE RESTORE LIST CNTL */
1466         query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
1467         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1468         if (ret)
1469                 return ret;
1470         seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
1471                    fw_info.feature, fw_info.ver);
1472
1473         /* RLC SAVE RESTORE LIST GPM MEM */
1474         query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
1475         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1476         if (ret)
1477                 return ret;
1478         seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
1479                    fw_info.feature, fw_info.ver);
1480
1481         /* RLC SAVE RESTORE LIST SRM MEM */
1482         query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
1483         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1484         if (ret)
1485                 return ret;
1486         seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
1487                    fw_info.feature, fw_info.ver);
1488
1489         /* RLCP */
1490         query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCP;
1491         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1492         if (ret)
1493                 return ret;
1494         seq_printf(m, "RLCP feature version: %u, firmware version: 0x%08x\n",
1495                    fw_info.feature, fw_info.ver);
1496
1497         /* RLCV */
1498         query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCV;
1499         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1500         if (ret)
1501                 return ret;
1502         seq_printf(m, "RLCV feature version: %u, firmware version: 0x%08x\n",
1503                    fw_info.feature, fw_info.ver);
1504
1505         /* MEC */
1506         query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
1507         query_fw.index = 0;
1508         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1509         if (ret)
1510                 return ret;
1511         seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
1512                    fw_info.feature, fw_info.ver);
1513
1514         /* MEC2 */
1515         if (adev->gfx.mec2_fw) {
1516                 query_fw.index = 1;
1517                 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1518                 if (ret)
1519                         return ret;
1520                 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1521                            fw_info.feature, fw_info.ver);
1522         }
1523
1524         /* PSP SOS */
1525         query_fw.fw_type = AMDGPU_INFO_FW_SOS;
1526         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1527         if (ret)
1528                 return ret;
1529         seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
1530                    fw_info.feature, fw_info.ver);
1531
1532
1533         /* PSP ASD */
1534         query_fw.fw_type = AMDGPU_INFO_FW_ASD;
1535         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1536         if (ret)
1537                 return ret;
1538         seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
1539                    fw_info.feature, fw_info.ver);
1540
1541         query_fw.fw_type = AMDGPU_INFO_FW_TA;
1542         for (i = TA_FW_TYPE_PSP_XGMI; i < TA_FW_TYPE_MAX_INDEX; i++) {
1543                 query_fw.index = i;
1544                 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1545                 if (ret)
1546                         continue;
1547
1548                 seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
1549                            ta_fw_name[i], fw_info.feature, fw_info.ver);
1550         }
1551
1552         /* SMC */
1553         query_fw.fw_type = AMDGPU_INFO_FW_SMC;
1554         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1555         if (ret)
1556                 return ret;
1557         smu_program = (fw_info.ver >> 24) & 0xff;
1558         smu_major = (fw_info.ver >> 16) & 0xff;
1559         smu_minor = (fw_info.ver >> 8) & 0xff;
1560         smu_debug = (fw_info.ver >> 0) & 0xff;
1561         seq_printf(m, "SMC feature version: %u, program: %d, firmware version: 0x%08x (%d.%d.%d)\n",
1562                    fw_info.feature, smu_program, fw_info.ver, smu_major, smu_minor, smu_debug);
1563
1564         /* SDMA */
1565         query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
1566         for (i = 0; i < adev->sdma.num_instances; i++) {
1567                 query_fw.index = i;
1568                 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1569                 if (ret)
1570                         return ret;
1571                 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1572                            i, fw_info.feature, fw_info.ver);
1573         }
1574
1575         /* VCN */
1576         query_fw.fw_type = AMDGPU_INFO_FW_VCN;
1577         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1578         if (ret)
1579                 return ret;
1580         seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
1581                    fw_info.feature, fw_info.ver);
1582
1583         /* DMCU */
1584         query_fw.fw_type = AMDGPU_INFO_FW_DMCU;
1585         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1586         if (ret)
1587                 return ret;
1588         seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
1589                    fw_info.feature, fw_info.ver);
1590
1591         /* DMCUB */
1592         query_fw.fw_type = AMDGPU_INFO_FW_DMCUB;
1593         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1594         if (ret)
1595                 return ret;
1596         seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n",
1597                    fw_info.feature, fw_info.ver);
1598
1599         /* TOC */
1600         query_fw.fw_type = AMDGPU_INFO_FW_TOC;
1601         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1602         if (ret)
1603                 return ret;
1604         seq_printf(m, "TOC feature version: %u, firmware version: 0x%08x\n",
1605                    fw_info.feature, fw_info.ver);
1606
1607         /* CAP */
1608         if (adev->psp.cap_fw) {
1609                 query_fw.fw_type = AMDGPU_INFO_FW_CAP;
1610                 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1611                 if (ret)
1612                         return ret;
1613                 seq_printf(m, "CAP feature version: %u, firmware version: 0x%08x\n",
1614                                 fw_info.feature, fw_info.ver);
1615         }
1616
1617         /* MES_KIQ */
1618         query_fw.fw_type = AMDGPU_INFO_FW_MES_KIQ;
1619         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1620         if (ret)
1621                 return ret;
1622         seq_printf(m, "MES_KIQ feature version: %u, firmware version: 0x%08x\n",
1623                    fw_info.feature, fw_info.ver);
1624
1625         /* MES */
1626         query_fw.fw_type = AMDGPU_INFO_FW_MES;
1627         ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1628         if (ret)
1629                 return ret;
1630         seq_printf(m, "MES feature version: %u, firmware version: 0x%08x\n",
1631                    fw_info.feature, fw_info.ver);
1632
1633         seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
1634
1635         return 0;
1636 }
1637
1638 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_firmware_info);
1639
1640 #endif
1641
1642 void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
1643 {
1644 #if defined(CONFIG_DEBUG_FS)
1645         struct drm_minor *minor = adev_to_drm(adev)->primary;
1646         struct dentry *root = minor->debugfs_root;
1647
1648         debugfs_create_file("amdgpu_firmware_info", 0444, root,
1649                             adev, &amdgpu_debugfs_firmware_info_fops);
1650
1651 #endif
1652 }
This page took 0.128593 seconds and 4 git commands to generate.