2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/list.h>
25 #include <linux/slab.h>
26 #include <linux/pci.h>
28 #include <linux/firmware.h>
29 #include <drm/amdgpu_drm.h>
31 #include "cgs_linux.h"
33 #include "amdgpu_ucode.h"
35 struct amdgpu_cgs_device {
36 struct cgs_device base;
37 struct amdgpu_device *adev;
40 #define CGS_FUNC_ADEV \
41 struct amdgpu_device *adev = \
42 ((struct amdgpu_cgs_device *)cgs_device)->adev
45 static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
48 return RREG32(offset);
51 static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset,
55 WREG32(offset, value);
58 static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
59 enum cgs_ind_reg space,
64 case CGS_IND_REG__MMIO:
65 return RREG32_IDX(index);
66 case CGS_IND_REG__PCIE:
67 return RREG32_PCIE(index);
68 case CGS_IND_REG__SMC:
69 return RREG32_SMC(index);
70 case CGS_IND_REG__UVD_CTX:
71 return RREG32_UVD_CTX(index);
72 case CGS_IND_REG__DIDT:
73 return RREG32_DIDT(index);
74 case CGS_IND_REG_GC_CAC:
75 return RREG32_GC_CAC(index);
76 case CGS_IND_REG_SE_CAC:
77 return RREG32_SE_CAC(index);
78 case CGS_IND_REG__AUDIO_ENDPT:
79 DRM_ERROR("audio endpt register access not implemented.\n");
82 WARN(1, "Invalid indirect register space");
86 static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
87 enum cgs_ind_reg space,
88 unsigned index, uint32_t value)
92 case CGS_IND_REG__MMIO:
93 return WREG32_IDX(index, value);
94 case CGS_IND_REG__PCIE:
95 return WREG32_PCIE(index, value);
96 case CGS_IND_REG__SMC:
97 return WREG32_SMC(index, value);
98 case CGS_IND_REG__UVD_CTX:
99 return WREG32_UVD_CTX(index, value);
100 case CGS_IND_REG__DIDT:
101 return WREG32_DIDT(index, value);
102 case CGS_IND_REG_GC_CAC:
103 return WREG32_GC_CAC(index, value);
104 case CGS_IND_REG_SE_CAC:
105 return WREG32_SE_CAC(index, value);
106 case CGS_IND_REG__AUDIO_ENDPT:
107 DRM_ERROR("audio endpt register access not implemented.\n");
110 WARN(1, "Invalid indirect register space");
113 static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
114 enum cgs_resource_type resource_type,
117 uint64_t *resource_base)
121 if (resource_base == NULL)
124 switch (resource_type) {
125 case CGS_RESOURCE_TYPE_MMIO:
126 if (adev->rmmio_size == 0)
128 if ((offset + size) > adev->rmmio_size)
130 *resource_base = adev->rmmio_base;
132 case CGS_RESOURCE_TYPE_DOORBELL:
133 if (adev->doorbell.size == 0)
135 if ((offset + size) > adev->doorbell.size)
137 *resource_base = adev->doorbell.base;
139 case CGS_RESOURCE_TYPE_FB:
140 case CGS_RESOURCE_TYPE_IO:
141 case CGS_RESOURCE_TYPE_ROM:
147 static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
148 unsigned table, uint16_t *size,
149 uint8_t *frev, uint8_t *crev)
154 if (amdgpu_atom_parse_data_header(
155 adev->mode_info.atom_context, table, size,
156 frev, crev, &data_start))
157 return (uint8_t*)adev->mode_info.atom_context->bios +
163 static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
164 uint8_t *frev, uint8_t *crev)
168 if (amdgpu_atom_parse_cmd_header(
169 adev->mode_info.atom_context, table,
176 static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
181 return amdgpu_atom_execute_table(
182 adev->mode_info.atom_context, table, args);
185 struct cgs_irq_params {
187 cgs_irq_source_set_func_t set;
188 cgs_irq_handler_func_t handler;
192 static int cgs_set_irq_state(struct amdgpu_device *adev,
193 struct amdgpu_irq_src *src,
195 enum amdgpu_interrupt_state state)
197 struct cgs_irq_params *irq_params =
198 (struct cgs_irq_params *)src->data;
201 if (!irq_params->set)
203 return irq_params->set(irq_params->private_data,
209 static int cgs_process_irq(struct amdgpu_device *adev,
210 struct amdgpu_irq_src *source,
211 struct amdgpu_iv_entry *entry)
213 struct cgs_irq_params *irq_params =
214 (struct cgs_irq_params *)source->data;
217 if (!irq_params->handler)
219 return irq_params->handler(irq_params->private_data,
224 static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
225 .set = cgs_set_irq_state,
226 .process = cgs_process_irq,
229 static int amdgpu_cgs_add_irq_source(void *cgs_device,
233 cgs_irq_source_set_func_t set,
234 cgs_irq_handler_func_t handler,
239 struct cgs_irq_params *irq_params;
240 struct amdgpu_irq_src *source =
241 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
245 kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
250 source->num_types = num_types;
251 source->funcs = &cgs_irq_funcs;
252 irq_params->src_id = src_id;
253 irq_params->set = set;
254 irq_params->handler = handler;
255 irq_params->private_data = private_data;
256 source->data = (void *)irq_params;
257 ret = amdgpu_irq_add_id(adev, client_id, src_id, source);
266 static int amdgpu_cgs_irq_get(void *cgs_device, unsigned client_id,
267 unsigned src_id, unsigned type)
271 if (!adev->irq.client[client_id].sources)
274 return amdgpu_irq_get(adev, adev->irq.client[client_id].sources[src_id], type);
277 static int amdgpu_cgs_irq_put(void *cgs_device, unsigned client_id,
278 unsigned src_id, unsigned type)
282 if (!adev->irq.client[client_id].sources)
285 return amdgpu_irq_put(adev, adev->irq.client[client_id].sources[src_id], type);
288 static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
289 enum amd_ip_block_type block_type,
290 enum amd_clockgating_state state)
295 for (i = 0; i < adev->num_ip_blocks; i++) {
296 if (!adev->ip_blocks[i].status.valid)
299 if (adev->ip_blocks[i].version->type == block_type) {
300 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
309 static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
310 enum amd_ip_block_type block_type,
311 enum amd_powergating_state state)
316 for (i = 0; i < adev->num_ip_blocks; i++) {
317 if (!adev->ip_blocks[i].status.valid)
320 if (adev->ip_blocks[i].version->type == block_type) {
321 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
331 static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
334 enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
337 case CGS_UCODE_ID_SDMA0:
338 result = AMDGPU_UCODE_ID_SDMA0;
340 case CGS_UCODE_ID_SDMA1:
341 result = AMDGPU_UCODE_ID_SDMA1;
343 case CGS_UCODE_ID_CP_CE:
344 result = AMDGPU_UCODE_ID_CP_CE;
346 case CGS_UCODE_ID_CP_PFP:
347 result = AMDGPU_UCODE_ID_CP_PFP;
349 case CGS_UCODE_ID_CP_ME:
350 result = AMDGPU_UCODE_ID_CP_ME;
352 case CGS_UCODE_ID_CP_MEC:
353 case CGS_UCODE_ID_CP_MEC_JT1:
354 result = AMDGPU_UCODE_ID_CP_MEC1;
356 case CGS_UCODE_ID_CP_MEC_JT2:
357 /* for VI. JT2 should be the same as JT1, because:
358 1, MEC2 and MEC1 use exactly same FW.
359 2, JT2 is not pached but JT1 is.
361 if (adev->asic_type >= CHIP_TOPAZ)
362 result = AMDGPU_UCODE_ID_CP_MEC1;
364 result = AMDGPU_UCODE_ID_CP_MEC2;
366 case CGS_UCODE_ID_RLC_G:
367 result = AMDGPU_UCODE_ID_RLC_G;
369 case CGS_UCODE_ID_STORAGE:
370 result = AMDGPU_UCODE_ID_STORAGE;
373 DRM_ERROR("Firmware type not supported\n");
378 static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
381 if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
382 release_firmware(adev->pm.fw);
386 /* cannot release other firmware because they are not created by cgs */
390 static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
391 enum cgs_ucode_id type)
394 uint16_t fw_version = 0;
397 case CGS_UCODE_ID_SDMA0:
398 fw_version = adev->sdma.instance[0].fw_version;
400 case CGS_UCODE_ID_SDMA1:
401 fw_version = adev->sdma.instance[1].fw_version;
403 case CGS_UCODE_ID_CP_CE:
404 fw_version = adev->gfx.ce_fw_version;
406 case CGS_UCODE_ID_CP_PFP:
407 fw_version = adev->gfx.pfp_fw_version;
409 case CGS_UCODE_ID_CP_ME:
410 fw_version = adev->gfx.me_fw_version;
412 case CGS_UCODE_ID_CP_MEC:
413 fw_version = adev->gfx.mec_fw_version;
415 case CGS_UCODE_ID_CP_MEC_JT1:
416 fw_version = adev->gfx.mec_fw_version;
418 case CGS_UCODE_ID_CP_MEC_JT2:
419 fw_version = adev->gfx.mec_fw_version;
421 case CGS_UCODE_ID_RLC_G:
422 fw_version = adev->gfx.rlc_fw_version;
424 case CGS_UCODE_ID_STORAGE:
427 DRM_ERROR("firmware type %d do not have version\n", type);
433 static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device,
438 if (adev->gfx.rlc.funcs->enter_safe_mode == NULL ||
439 adev->gfx.rlc.funcs->exit_safe_mode == NULL)
443 adev->gfx.rlc.funcs->enter_safe_mode(adev);
445 adev->gfx.rlc.funcs->exit_safe_mode(adev);
450 static void amdgpu_cgs_lock_grbm_idx(struct cgs_device *cgs_device,
456 mutex_lock(&adev->grbm_idx_mutex);
458 mutex_unlock(&adev->grbm_idx_mutex);
461 static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
462 enum cgs_ucode_id type,
463 struct cgs_firmware_info *info)
467 if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) {
470 const struct gfx_firmware_header_v1_0 *header;
471 enum AMDGPU_UCODE_ID id;
472 struct amdgpu_firmware_info *ucode;
474 id = fw_type_convert(cgs_device, type);
475 ucode = &adev->firmware.ucode[id];
476 if (ucode->fw == NULL)
479 gpu_addr = ucode->mc_addr;
480 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
481 data_size = le32_to_cpu(header->header.ucode_size_bytes);
483 if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
484 (type == CGS_UCODE_ID_CP_MEC_JT2)) {
485 gpu_addr += ALIGN(le32_to_cpu(header->header.ucode_size_bytes), PAGE_SIZE);
486 data_size = le32_to_cpu(header->jt_size) << 2;
489 info->kptr = ucode->kaddr;
490 info->image_size = data_size;
491 info->mc_addr = gpu_addr;
492 info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
494 if (CGS_UCODE_ID_CP_MEC == type)
495 info->image_size = le32_to_cpu(header->jt_offset) << 2;
497 info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
498 info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
500 char fw_name[30] = {0};
503 uint32_t ucode_start_address;
505 const struct smc_firmware_header_v1_0 *hdr;
506 const struct common_firmware_header *header;
507 struct amdgpu_firmware_info *ucode = NULL;
510 switch (adev->asic_type) {
512 strcpy(fw_name, "radeon/tahiti_smc.bin");
515 if ((adev->pdev->revision == 0x81) &&
516 ((adev->pdev->device == 0x6810) ||
517 (adev->pdev->device == 0x6811))) {
518 info->is_kicker = true;
519 strcpy(fw_name, "radeon/pitcairn_k_smc.bin");
521 strcpy(fw_name, "radeon/pitcairn_smc.bin");
525 if (((adev->pdev->device == 0x6820) &&
526 ((adev->pdev->revision == 0x81) ||
527 (adev->pdev->revision == 0x83))) ||
528 ((adev->pdev->device == 0x6821) &&
529 ((adev->pdev->revision == 0x83) ||
530 (adev->pdev->revision == 0x87))) ||
531 ((adev->pdev->revision == 0x87) &&
532 ((adev->pdev->device == 0x6823) ||
533 (adev->pdev->device == 0x682b)))) {
534 info->is_kicker = true;
535 strcpy(fw_name, "radeon/verde_k_smc.bin");
537 strcpy(fw_name, "radeon/verde_smc.bin");
541 if (((adev->pdev->revision == 0x81) &&
542 ((adev->pdev->device == 0x6600) ||
543 (adev->pdev->device == 0x6604) ||
544 (adev->pdev->device == 0x6605) ||
545 (adev->pdev->device == 0x6610))) ||
546 ((adev->pdev->revision == 0x83) &&
547 (adev->pdev->device == 0x6610))) {
548 info->is_kicker = true;
549 strcpy(fw_name, "radeon/oland_k_smc.bin");
551 strcpy(fw_name, "radeon/oland_smc.bin");
555 if (((adev->pdev->revision == 0x81) &&
556 (adev->pdev->device == 0x6660)) ||
557 ((adev->pdev->revision == 0x83) &&
558 ((adev->pdev->device == 0x6660) ||
559 (adev->pdev->device == 0x6663) ||
560 (adev->pdev->device == 0x6665) ||
561 (adev->pdev->device == 0x6667)))) {
562 info->is_kicker = true;
563 strcpy(fw_name, "radeon/hainan_k_smc.bin");
564 } else if ((adev->pdev->revision == 0xc3) &&
565 (adev->pdev->device == 0x6665)) {
566 info->is_kicker = true;
567 strcpy(fw_name, "radeon/banks_k_2_smc.bin");
569 strcpy(fw_name, "radeon/hainan_smc.bin");
573 if ((adev->pdev->revision == 0x80) ||
574 (adev->pdev->revision == 0x81) ||
575 (adev->pdev->device == 0x665f)) {
576 info->is_kicker = true;
577 strcpy(fw_name, "radeon/bonaire_k_smc.bin");
579 strcpy(fw_name, "radeon/bonaire_smc.bin");
583 if (adev->pdev->revision == 0x80) {
584 info->is_kicker = true;
585 strcpy(fw_name, "radeon/hawaii_k_smc.bin");
587 strcpy(fw_name, "radeon/hawaii_smc.bin");
591 if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
592 ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
593 ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
594 info->is_kicker = true;
595 strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
597 strcpy(fw_name, "amdgpu/topaz_smc.bin");
600 if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
601 ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) {
602 info->is_kicker = true;
603 strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
605 strcpy(fw_name, "amdgpu/tonga_smc.bin");
608 strcpy(fw_name, "amdgpu/fiji_smc.bin");
611 if (type == CGS_UCODE_ID_SMU) {
612 if (((adev->pdev->device == 0x67ef) &&
613 ((adev->pdev->revision == 0xe0) ||
614 (adev->pdev->revision == 0xe2) ||
615 (adev->pdev->revision == 0xe5))) ||
616 ((adev->pdev->device == 0x67ff) &&
617 ((adev->pdev->revision == 0xcf) ||
618 (adev->pdev->revision == 0xef) ||
619 (adev->pdev->revision == 0xff)))) {
620 info->is_kicker = true;
621 strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
623 strcpy(fw_name, "amdgpu/polaris11_smc.bin");
624 } else if (type == CGS_UCODE_ID_SMU_SK) {
625 strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
629 if (type == CGS_UCODE_ID_SMU) {
630 if ((adev->pdev->device == 0x67df) &&
631 ((adev->pdev->revision == 0xe0) ||
632 (adev->pdev->revision == 0xe3) ||
633 (adev->pdev->revision == 0xe4) ||
634 (adev->pdev->revision == 0xe5) ||
635 (adev->pdev->revision == 0xe7) ||
636 (adev->pdev->revision == 0xef))) {
637 info->is_kicker = true;
638 strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
640 strcpy(fw_name, "amdgpu/polaris10_smc.bin");
641 } else if (type == CGS_UCODE_ID_SMU_SK) {
642 strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
646 strcpy(fw_name, "amdgpu/polaris12_smc.bin");
649 if ((adev->pdev->device == 0x687f) &&
650 ((adev->pdev->revision == 0xc0) ||
651 (adev->pdev->revision == 0xc1) ||
652 (adev->pdev->revision == 0xc3)))
653 strcpy(fw_name, "amdgpu/vega10_acg_smc.bin");
655 strcpy(fw_name, "amdgpu/vega10_smc.bin");
658 DRM_ERROR("SMC firmware not supported\n");
662 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
664 DRM_ERROR("Failed to request firmware\n");
668 err = amdgpu_ucode_validate(adev->pm.fw);
670 DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
671 release_firmware(adev->pm.fw);
676 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
677 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
678 ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
679 ucode->fw = adev->pm.fw;
680 header = (const struct common_firmware_header *)ucode->fw->data;
681 adev->firmware.fw_size +=
682 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
686 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
687 amdgpu_ucode_print_smc_hdr(&hdr->header);
688 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
689 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
690 ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
691 src = (const uint8_t *)(adev->pm.fw->data +
692 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
694 info->version = adev->pm.fw_version;
695 info->image_size = ucode_size;
696 info->ucode_start_address = ucode_start_address;
697 info->kptr = (void *)src;
702 static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
705 return amdgpu_sriov_vf(adev);
708 static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
709 struct cgs_display_info *info)
712 struct cgs_mode_info *mode_info;
717 mode_info = info->mode_info;
719 /* if the displays are off, vblank time is max */
720 mode_info->vblank_time_us = 0xffffffff;
721 /* always set the reference clock */
722 mode_info->ref_clock = adev->clock.spll.reference_freq;
725 if (!amdgpu_device_has_dc_support(adev)) {
726 struct amdgpu_crtc *amdgpu_crtc;
727 struct drm_device *ddev = adev->ddev;
728 struct drm_crtc *crtc;
729 uint32_t line_time_us, vblank_lines;
731 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
732 list_for_each_entry(crtc,
733 &ddev->mode_config.crtc_list, head) {
734 amdgpu_crtc = to_amdgpu_crtc(crtc);
736 info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
737 info->display_count++;
739 if (mode_info != NULL &&
740 crtc->enabled && amdgpu_crtc->enabled &&
741 amdgpu_crtc->hw_mode.clock) {
742 line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
743 amdgpu_crtc->hw_mode.clock;
744 vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
745 amdgpu_crtc->hw_mode.crtc_vdisplay +
746 (amdgpu_crtc->v_border * 2);
747 mode_info->vblank_time_us = vblank_lines * line_time_us;
748 mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
749 /* we have issues with mclk switching with refresh rates
750 * over 120 hz on the non-DC code.
752 if (mode_info->refresh_rate > 120)
753 mode_info->vblank_time_us = 0;
759 info->display_count = adev->pm.pm_display_cfg.num_display;
760 if (mode_info != NULL) {
761 mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time;
762 mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh;
769 static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
773 adev->pm.dpm_enabled = enabled;
778 static const struct cgs_ops amdgpu_cgs_ops = {
779 .read_register = amdgpu_cgs_read_register,
780 .write_register = amdgpu_cgs_write_register,
781 .read_ind_register = amdgpu_cgs_read_ind_register,
782 .write_ind_register = amdgpu_cgs_write_ind_register,
783 .get_pci_resource = amdgpu_cgs_get_pci_resource,
784 .atom_get_data_table = amdgpu_cgs_atom_get_data_table,
785 .atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs,
786 .atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table,
787 .get_firmware_info = amdgpu_cgs_get_firmware_info,
788 .rel_firmware = amdgpu_cgs_rel_firmware,
789 .set_powergating_state = amdgpu_cgs_set_powergating_state,
790 .set_clockgating_state = amdgpu_cgs_set_clockgating_state,
791 .get_active_displays_info = amdgpu_cgs_get_active_displays_info,
792 .notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled,
793 .is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
794 .enter_safe_mode = amdgpu_cgs_enter_safe_mode,
795 .lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
798 static const struct cgs_os_ops amdgpu_cgs_os_ops = {
799 .add_irq_source = amdgpu_cgs_add_irq_source,
800 .irq_get = amdgpu_cgs_irq_get,
801 .irq_put = amdgpu_cgs_irq_put
804 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
806 struct amdgpu_cgs_device *cgs_device =
807 kmalloc(sizeof(*cgs_device), GFP_KERNEL);
810 DRM_ERROR("Couldn't allocate CGS device structure\n");
814 cgs_device->base.ops = &amdgpu_cgs_ops;
815 cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
816 cgs_device->adev = adev;
818 return (struct cgs_device *)cgs_device;
821 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device)