2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/module.h>
27 #include <asm/hypervisor.h>
30 #include <drm/drm_drv.h>
34 #include "amdgpu_ras.h"
35 #include "amdgpu_reset.h"
40 #define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
42 vf2pf_info->ucode_info[ucode].id = ucode; \
43 vf2pf_info->ucode_info[ucode].version = ver; \
46 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
48 /* By now all MMIO pages except mailbox are blocked */
49 /* if blocking is enabled in hypervisor. Choose the */
50 /* SCRATCH_REG0 to test. */
51 return RREG32_NO_KIQ(0xc040) == 0xffffffff;
54 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
56 struct drm_device *ddev = adev_to_drm(adev);
58 /* enable virtual display */
59 if (adev->asic_type != CHIP_ALDEBARAN &&
60 adev->asic_type != CHIP_ARCTURUS &&
61 ((adev->pdev->class >> 8) != PCI_CLASS_ACCELERATOR_PROCESSING)) {
62 if (adev->mode_info.num_crtc == 0)
63 adev->mode_info.num_crtc = 1;
64 adev->enable_virtual_display = true;
66 ddev->driver_features &= ~DRIVER_ATOMIC;
70 /* Reduce kcq number to 2 to reduce latency */
71 if (amdgpu_num_kcq == -1)
76 * amdgpu_virt_request_full_gpu() - request full gpu access
77 * @adev: amdgpu device.
78 * @init: is driver init time.
79 * When start to init/fini driver, first need to request full gpu access.
80 * Return: Zero if request success, otherwise will return error.
82 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
84 struct amdgpu_virt *virt = &adev->virt;
87 if (virt->ops && virt->ops->req_full_gpu) {
88 r = virt->ops->req_full_gpu(adev, init);
92 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
99 * amdgpu_virt_release_full_gpu() - release full gpu access
100 * @adev: amdgpu device.
101 * @init: is driver init time.
102 * When finishing driver init/fini, need to release full gpu access.
103 * Return: Zero if release success, otherwise will returen error.
105 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
107 struct amdgpu_virt *virt = &adev->virt;
110 if (virt->ops && virt->ops->rel_full_gpu) {
111 r = virt->ops->rel_full_gpu(adev, init);
115 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
121 * amdgpu_virt_reset_gpu() - reset gpu
122 * @adev: amdgpu device.
123 * Send reset command to GPU hypervisor to reset GPU that VM is using
124 * Return: Zero if reset success, otherwise will return error.
126 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
128 struct amdgpu_virt *virt = &adev->virt;
131 if (virt->ops && virt->ops->reset_gpu) {
132 r = virt->ops->reset_gpu(adev);
136 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
142 void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
144 struct amdgpu_virt *virt = &adev->virt;
146 if (virt->ops && virt->ops->req_init_data)
147 virt->ops->req_init_data(adev);
149 if (adev->virt.req_init_data_ver > 0)
150 DRM_INFO("host supports REQ_INIT_DATA handshake\n");
152 DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
156 * amdgpu_virt_ready_to_reset() - send ready to reset to host
157 * @adev: amdgpu device.
158 * Send ready to reset message to GPU hypervisor to signal we have stopped GPU
159 * activity and is ready for host FLR
161 void amdgpu_virt_ready_to_reset(struct amdgpu_device *adev)
163 struct amdgpu_virt *virt = &adev->virt;
165 if (virt->ops && virt->ops->reset_gpu)
166 virt->ops->ready_to_reset(adev);
170 * amdgpu_virt_wait_reset() - wait for reset gpu completed
171 * @adev: amdgpu device.
172 * Wait for GPU reset completed.
173 * Return: Zero if reset success, otherwise will return error.
175 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
177 struct amdgpu_virt *virt = &adev->virt;
179 if (!virt->ops || !virt->ops->wait_reset)
182 return virt->ops->wait_reset(adev);
186 * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
187 * @adev: amdgpu device.
188 * MM table is used by UVD and VCE for its initialization
189 * Return: Zero if allocate success.
191 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
195 if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
198 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
199 AMDGPU_GEM_DOMAIN_VRAM |
200 AMDGPU_GEM_DOMAIN_GTT,
201 &adev->virt.mm_table.bo,
202 &adev->virt.mm_table.gpu_addr,
203 (void *)&adev->virt.mm_table.cpu_addr);
205 DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
209 memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
210 DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
211 adev->virt.mm_table.gpu_addr,
212 adev->virt.mm_table.cpu_addr);
217 * amdgpu_virt_free_mm_table() - free mm table memory
218 * @adev: amdgpu device.
219 * Free MM table memory
221 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
223 if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
226 amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
227 &adev->virt.mm_table.gpu_addr,
228 (void *)&adev->virt.mm_table.cpu_addr);
229 adev->virt.mm_table.gpu_addr = 0;
233 * amdgpu_virt_rcvd_ras_interrupt() - receive ras interrupt
234 * @adev: amdgpu device.
235 * Check whether host sent RAS error message
236 * Return: true if found, otherwise false
238 bool amdgpu_virt_rcvd_ras_interrupt(struct amdgpu_device *adev)
240 struct amdgpu_virt *virt = &adev->virt;
242 if (!virt->ops || !virt->ops->rcvd_ras_intr)
245 return virt->ops->rcvd_ras_intr(adev);
249 unsigned int amd_sriov_msg_checksum(void *obj,
250 unsigned long obj_size,
252 unsigned int checksum)
254 unsigned int ret = key;
259 /* calculate checksum */
260 for (i = 0; i < obj_size; ++i)
262 /* minus the checksum itself */
263 pos = (char *)&checksum;
264 for (i = 0; i < sizeof(checksum); ++i)
269 static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
271 struct amdgpu_virt *virt = &adev->virt;
272 struct amdgpu_virt_ras_err_handler_data **data = &virt->virt_eh_data;
273 /* GPU will be marked bad on host if bp count more then 10,
274 * so alloc 512 is enough.
276 unsigned int align_space = 512;
278 struct amdgpu_bo **bps_bo = NULL;
280 *data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL);
284 bps = kmalloc_array(align_space, sizeof(*(*data)->bps), GFP_KERNEL);
288 bps_bo = kmalloc_array(align_space, sizeof(*(*data)->bps_bo), GFP_KERNEL);
293 (*data)->bps_bo = bps_bo;
295 (*data)->last_reserved = 0;
297 virt->ras_init_done = true;
309 static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
311 struct amdgpu_virt *virt = &adev->virt;
312 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
313 struct amdgpu_bo *bo;
319 for (i = data->last_reserved - 1; i >= 0; i--) {
320 bo = data->bps_bo[i];
322 amdgpu_bo_free_kernel(&bo, NULL, NULL);
323 data->bps_bo[i] = bo;
325 data->last_reserved = i;
329 void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev)
331 struct amdgpu_virt *virt = &adev->virt;
332 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
334 virt->ras_init_done = false;
339 amdgpu_virt_ras_release_bp(adev);
344 virt->virt_eh_data = NULL;
347 static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev,
348 struct eeprom_table_record *bps, int pages)
350 struct amdgpu_virt *virt = &adev->virt;
351 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
356 memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
357 data->count += pages;
360 static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
362 struct amdgpu_virt *virt = &adev->virt;
363 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
364 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
365 struct ttm_resource_manager *man = &mgr->manager;
366 struct amdgpu_bo *bo = NULL;
373 for (i = data->last_reserved; i < data->count; i++) {
374 bp = data->bps[i].retired_page;
376 /* There are two cases of reserve error should be ignored:
377 * 1) a ras bad page has been allocated (used by someone);
378 * 2) a ras bad page has been reserved (duplicate error injection
381 if (ttm_resource_manager_used(man)) {
382 amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
383 bp << AMDGPU_GPU_PAGE_SHIFT,
384 AMDGPU_GPU_PAGE_SIZE);
385 data->bps_bo[i] = NULL;
387 if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
388 AMDGPU_GPU_PAGE_SIZE,
390 DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
391 data->bps_bo[i] = bo;
393 data->last_reserved = i + 1;
398 static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev,
399 uint64_t retired_page)
401 struct amdgpu_virt *virt = &adev->virt;
402 struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
408 for (i = 0; i < data->count; i++)
409 if (retired_page == data->bps[i].retired_page)
415 static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
416 uint64_t bp_block_offset, uint32_t bp_block_size)
418 struct eeprom_table_record bp;
419 uint64_t retired_page;
420 uint32_t bp_idx, bp_cnt;
421 void *vram_usage_va = NULL;
423 if (adev->mman.fw_vram_usage_va)
424 vram_usage_va = adev->mman.fw_vram_usage_va;
426 vram_usage_va = adev->mman.drv_vram_usage_va;
428 memset(&bp, 0, sizeof(bp));
431 bp_cnt = bp_block_size / sizeof(uint64_t);
432 for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
433 retired_page = *(uint64_t *)(vram_usage_va +
434 bp_block_offset + bp_idx * sizeof(uint64_t));
435 bp.retired_page = retired_page;
437 if (amdgpu_virt_ras_check_bad_page(adev, retired_page))
440 amdgpu_virt_ras_add_bps(adev, &bp, 1);
442 amdgpu_virt_ras_reserve_bps(adev);
447 static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
449 struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
456 if (adev->virt.fw_reserve.p_pf2vf == NULL)
459 if (pf2vf_info->size > 1024) {
460 dev_err(adev->dev, "invalid pf2vf message size: 0x%x\n", pf2vf_info->size);
464 switch (pf2vf_info->version) {
466 checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
467 checkval = amd_sriov_msg_checksum(
468 adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
469 adev->virt.fw_reserve.checksum_key, checksum);
470 if (checksum != checkval) {
472 "invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n",
477 adev->virt.gim_feature =
478 ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
481 /* TODO: missing key, need to add it later */
482 checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
483 checkval = amd_sriov_msg_checksum(
484 adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
486 if (checksum != checkval) {
488 "invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n",
493 adev->virt.vf2pf_update_interval_ms =
494 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
495 adev->virt.gim_feature =
496 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
497 adev->virt.reg_access =
498 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
500 adev->virt.decode_max_dimension_pixels = 0;
501 adev->virt.decode_max_frame_pixels = 0;
502 adev->virt.encode_max_dimension_pixels = 0;
503 adev->virt.encode_max_frame_pixels = 0;
504 adev->virt.is_mm_bw_enabled = false;
505 for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) {
506 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels;
507 adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels);
509 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels;
510 adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels);
512 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels;
513 adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels);
515 tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
516 adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
518 if ((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
519 adev->virt.is_mm_bw_enabled = true;
522 ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
525 dev_err(adev->dev, "invalid pf2vf version: 0x%x\n", pf2vf_info->version);
529 /* correct too large or too little interval value */
530 if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
531 adev->virt.vf2pf_update_interval_ms = 2000;
536 static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
538 struct amd_sriov_msg_vf2pf_info *vf2pf_info;
539 vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
541 if (adev->virt.fw_reserve.p_vf2pf == NULL)
544 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE, adev->vce.fw_version);
545 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD, adev->uvd.fw_version);
546 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC, adev->gmc.fw_version);
547 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME, adev->gfx.me_fw_version);
548 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP, adev->gfx.pfp_fw_version);
549 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE, adev->gfx.ce_fw_version);
550 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC, adev->gfx.rlc_fw_version);
551 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
552 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
553 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
554 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
555 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
556 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version);
557 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
558 adev->psp.asd_context.bin_desc.fw_version);
559 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,
560 adev->psp.ras_context.context.bin_desc.fw_version);
561 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,
562 adev->psp.xgmi_context.context.bin_desc.fw_version);
563 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version);
564 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version);
565 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version);
566 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN, adev->vcn.fw_version);
567 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU, adev->dm.dmcu_fw_version);
570 static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
572 struct amd_sriov_msg_vf2pf_info *vf2pf_info;
574 vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
576 if (adev->virt.fw_reserve.p_vf2pf == NULL)
579 memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
581 vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
582 vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
585 if (THIS_MODULE->version != NULL)
586 strcpy(vf2pf_info->driver_version, THIS_MODULE->version);
589 strcpy(vf2pf_info->driver_version, "N/A");
591 vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all
592 vf2pf_info->driver_cert = 0;
593 vf2pf_info->os_info.all = 0;
595 vf2pf_info->fb_usage =
596 ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
597 vf2pf_info->fb_vis_usage =
598 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
599 vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
600 vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
602 amdgpu_virt_populate_vf2pf_ucode_info(adev);
604 /* TODO: read dynamic info */
605 vf2pf_info->gfx_usage = 0;
606 vf2pf_info->compute_usage = 0;
607 vf2pf_info->encode_usage = 0;
608 vf2pf_info->decode_usage = 0;
610 vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
611 vf2pf_info->mes_info_addr = (uint64_t)adev->mes.resource_1_gpu_addr;
613 if (adev->mes.resource_1) {
614 vf2pf_info->mes_info_size = adev->mes.resource_1->tbo.base.size;
616 vf2pf_info->checksum =
617 amd_sriov_msg_checksum(
618 vf2pf_info, sizeof(*vf2pf_info), 0, 0);
623 static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
625 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
628 ret = amdgpu_virt_read_pf2vf_data(adev);
630 adev->virt.vf2pf_update_retry_cnt++;
632 if ((amdgpu_virt_rcvd_ras_interrupt(adev) ||
633 adev->virt.vf2pf_update_retry_cnt >= AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT) &&
634 amdgpu_sriov_runtime(adev)) {
636 amdgpu_ras_set_fed(adev, true);
637 if (amdgpu_reset_domain_schedule(adev->reset_domain,
638 &adev->kfd.reset_work))
641 dev_err(adev->dev, "Failed to queue work! at %s", __func__);
647 adev->virt.vf2pf_update_retry_cnt = 0;
648 amdgpu_virt_write_vf2pf_data(adev);
651 schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
654 void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
656 if (adev->virt.vf2pf_update_interval_ms != 0) {
657 DRM_INFO("clean up the vf2pf work item\n");
658 cancel_delayed_work_sync(&adev->virt.vf2pf_work);
659 adev->virt.vf2pf_update_interval_ms = 0;
663 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
665 adev->virt.fw_reserve.p_pf2vf = NULL;
666 adev->virt.fw_reserve.p_vf2pf = NULL;
667 adev->virt.vf2pf_update_interval_ms = 0;
668 adev->virt.vf2pf_update_retry_cnt = 0;
670 if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) {
671 DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!");
672 } else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
673 /* go through this logic in ip_init and reset to init workqueue*/
674 amdgpu_virt_exchange_data(adev);
676 INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
677 schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
678 } else if (adev->bios != NULL) {
679 /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
680 adev->virt.fw_reserve.p_pf2vf =
681 (struct amd_sriov_msg_pf2vf_info_header *)
682 (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
684 amdgpu_virt_read_pf2vf_data(adev);
689 void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
691 uint64_t bp_block_offset = 0;
692 uint32_t bp_block_size = 0;
693 struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
695 if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
696 if (adev->mman.fw_vram_usage_va) {
697 adev->virt.fw_reserve.p_pf2vf =
698 (struct amd_sriov_msg_pf2vf_info_header *)
699 (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
700 adev->virt.fw_reserve.p_vf2pf =
701 (struct amd_sriov_msg_vf2pf_info_header *)
702 (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
703 } else if (adev->mman.drv_vram_usage_va) {
704 adev->virt.fw_reserve.p_pf2vf =
705 (struct amd_sriov_msg_pf2vf_info_header *)
706 (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
707 adev->virt.fw_reserve.p_vf2pf =
708 (struct amd_sriov_msg_vf2pf_info_header *)
709 (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
712 amdgpu_virt_read_pf2vf_data(adev);
713 amdgpu_virt_write_vf2pf_data(adev);
715 /* bad page handling for version 2 */
716 if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
717 pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
719 bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
720 ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
721 bp_block_size = pf2vf_v2->bp_block_size;
723 if (bp_block_size && !adev->virt.ras_init_done)
724 amdgpu_virt_init_ras_err_handler_data(adev);
726 if (adev->virt.ras_init_done)
727 amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
732 void amdgpu_detect_virtualization(struct amdgpu_device *adev)
736 switch (adev->asic_type) {
739 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
745 case CHIP_SIENNA_CICHLID:
748 case CHIP_IP_DISCOVERY:
749 reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
751 default: /* other chip doesn't support SRIOV */
757 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
759 if (reg & 0x80000000)
760 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
763 /* passthrough mode exclus sriov mod */
764 if (is_virtual_machine() && !xen_initial_domain())
765 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
768 /* we have the ability to check now */
769 if (amdgpu_sriov_vf(adev)) {
770 switch (adev->asic_type) {
773 vi_set_virt_ops(adev);
776 soc15_set_virt_ops(adev);
778 /* not send GPU_INIT_DATA with MS_HYPERV*/
779 if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
781 /* send a dummy GPU_INIT_DATA request to host on vega10 */
782 amdgpu_virt_request_init_data(adev);
787 soc15_set_virt_ops(adev);
791 case CHIP_SIENNA_CICHLID:
792 case CHIP_IP_DISCOVERY:
793 nv_set_virt_ops(adev);
794 /* try send GPU_INIT_DATA request to host */
795 amdgpu_virt_request_init_data(adev);
797 default: /* other chip doesn't support SRIOV */
798 DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
804 static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
806 return amdgpu_sriov_is_debug(adev) ? true : false;
809 static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
811 return amdgpu_sriov_is_normal(adev) ? true : false;
814 int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
816 if (!amdgpu_sriov_vf(adev) ||
817 amdgpu_virt_access_debugfs_is_kiq(adev))
820 if (amdgpu_virt_access_debugfs_is_mmio(adev))
821 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
828 void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
830 if (amdgpu_sriov_vf(adev))
831 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
834 enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
836 enum amdgpu_sriov_vf_mode mode;
838 if (amdgpu_sriov_vf(adev)) {
839 if (amdgpu_sriov_is_pp_one_vf(adev))
840 mode = SRIOV_VF_MODE_ONE_VF;
842 mode = SRIOV_VF_MODE_MULTI_VF;
844 mode = SRIOV_VF_MODE_BARE_METAL;
850 void amdgpu_virt_post_reset(struct amdgpu_device *adev)
852 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) {
853 /* force set to GFXOFF state after reset,
854 * to avoid some invalid operation before GC enable
856 adev->gfx.is_poweron = false;
859 adev->mes.ring.sched.ready = false;
862 bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
864 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
865 case IP_VERSION(13, 0, 0):
866 /* no vf autoload, white list */
867 if (ucode_id == AMDGPU_UCODE_ID_VCN1 ||
868 ucode_id == AMDGPU_UCODE_ID_VCN)
872 case IP_VERSION(11, 0, 9):
873 case IP_VERSION(11, 0, 7):
874 /* black list for CHIP_NAVI12 and CHIP_SIENNA_CICHLID */
875 if (ucode_id == AMDGPU_UCODE_ID_RLC_G
876 || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
877 || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
878 || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
879 || ucode_id == AMDGPU_UCODE_ID_SMC)
883 case IP_VERSION(13, 0, 10):
885 if (ucode_id == AMDGPU_UCODE_ID_CAP
886 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP
887 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME
888 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC
889 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK
890 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK
891 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK
892 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK
893 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK
894 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK
895 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK
896 || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK
897 || ucode_id == AMDGPU_UCODE_ID_CP_MES
898 || ucode_id == AMDGPU_UCODE_ID_CP_MES_DATA
899 || ucode_id == AMDGPU_UCODE_ID_CP_MES1
900 || ucode_id == AMDGPU_UCODE_ID_CP_MES1_DATA
901 || ucode_id == AMDGPU_UCODE_ID_VCN1
902 || ucode_id == AMDGPU_UCODE_ID_VCN)
907 /* lagacy black list */
908 if (ucode_id == AMDGPU_UCODE_ID_SDMA0
909 || ucode_id == AMDGPU_UCODE_ID_SDMA1
910 || ucode_id == AMDGPU_UCODE_ID_SDMA2
911 || ucode_id == AMDGPU_UCODE_ID_SDMA3
912 || ucode_id == AMDGPU_UCODE_ID_SDMA4
913 || ucode_id == AMDGPU_UCODE_ID_SDMA5
914 || ucode_id == AMDGPU_UCODE_ID_SDMA6
915 || ucode_id == AMDGPU_UCODE_ID_SDMA7
916 || ucode_id == AMDGPU_UCODE_ID_RLC_G
917 || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
918 || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
919 || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
920 || ucode_id == AMDGPU_UCODE_ID_SMC)
927 void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
928 struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
929 struct amdgpu_video_codec_info *decode, uint32_t decode_array_size)
933 if (!adev->virt.is_mm_bw_enabled)
937 for (i = 0; i < encode_array_size; i++) {
938 encode[i].max_width = adev->virt.encode_max_dimension_pixels;
939 encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels;
940 if (encode[i].max_width > 0)
941 encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width;
943 encode[i].max_height = 0;
948 for (i = 0; i < decode_array_size; i++) {
949 decode[i].max_width = adev->virt.decode_max_dimension_pixels;
950 decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels;
951 if (decode[i].max_width > 0)
952 decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width;
954 decode[i].max_height = 0;
959 bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
960 u32 acc_flags, u32 hwip,
961 bool write, u32 *rlcg_flag)
967 if (amdgpu_sriov_reg_indirect_gc(adev)) {
969 write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ;
971 /* only in new version, AMDGPU_REGS_NO_KIQ and
972 * AMDGPU_REGS_RLC are enabled simultaneously */
973 } else if ((acc_flags & AMDGPU_REGS_RLC) &&
974 !(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
975 *rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY;
980 if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
981 (acc_flags & AMDGPU_REGS_RLC) && write) {
982 *rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE;
992 u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id)
994 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
995 uint32_t timeout = 50000;
1004 if (!adev->gfx.rlc.rlcg_reg_access_supported) {
1006 "indirect registers access through rlcg is not available\n");
1010 if (adev->gfx.xcc_mask && (((1 << xcc_id) & adev->gfx.xcc_mask) == 0)) {
1011 dev_err(adev->dev, "invalid xcc\n");
1015 if (amdgpu_device_skip_hw_access(adev))
1018 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id];
1019 scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
1020 scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
1021 scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
1022 scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
1024 mutex_lock(&adev->virt.rlcg_reg_lock);
1026 if (reg_access_ctrl->spare_int)
1027 spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
1029 if (offset == reg_access_ctrl->grbm_cntl) {
1030 /* if the target reg offset is grbm_cntl, write to scratch_reg2 */
1031 writel(v, scratch_reg2);
1032 if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
1033 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
1034 } else if (offset == reg_access_ctrl->grbm_idx) {
1035 /* if the target reg offset is grbm_idx, write to scratch_reg3 */
1036 writel(v, scratch_reg3);
1037 if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
1038 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
1041 * SCRATCH_REG0 = read/write value
1042 * SCRATCH_REG1[30:28] = command
1043 * SCRATCH_REG1[19:0] = address in dword
1044 * SCRATCH_REG1[27:24] = Error reporting
1046 writel(v, scratch_reg0);
1047 writel((offset | flag), scratch_reg1);
1048 if (reg_access_ctrl->spare_int)
1049 writel(1, spare_int);
1051 for (i = 0; i < timeout; i++) {
1052 tmp = readl(scratch_reg1);
1053 if (!(tmp & AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK))
1058 tmp = readl(scratch_reg1);
1059 if (i >= timeout || (tmp & AMDGPU_RLCG_SCRATCH1_ERROR_MASK) != 0) {
1060 if (amdgpu_sriov_rlcg_error_report_enabled(adev)) {
1061 if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) {
1063 "vfgate is disabled, rlcg failed to program reg: 0x%05x\n", offset);
1064 } else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) {
1066 "wrong operation type, rlcg failed to program reg: 0x%05x\n", offset);
1067 } else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) {
1069 "register is not in range, rlcg failed to program reg: 0x%05x\n", offset);
1072 "unknown error type, rlcg failed to program reg: 0x%05x\n", offset);
1076 "timeout: rlcg faled to program reg: 0x%05x\n", offset);
1081 ret = readl(scratch_reg0);
1083 mutex_unlock(&adev->virt.rlcg_reg_lock);
1088 void amdgpu_sriov_wreg(struct amdgpu_device *adev,
1089 u32 offset, u32 value,
1090 u32 acc_flags, u32 hwip, u32 xcc_id)
1094 if (amdgpu_device_skip_hw_access(adev))
1097 if (!amdgpu_sriov_runtime(adev) &&
1098 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
1099 amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id);
1103 if (acc_flags & AMDGPU_REGS_NO_KIQ)
1104 WREG32_NO_KIQ(offset, value);
1106 WREG32(offset, value);
1109 u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
1110 u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id)
1114 if (amdgpu_device_skip_hw_access(adev))
1117 if (!amdgpu_sriov_runtime(adev) &&
1118 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
1119 return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id);
1121 if (acc_flags & AMDGPU_REGS_NO_KIQ)
1122 return RREG32_NO_KIQ(offset);
1124 return RREG32(offset);
1127 bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev)
1129 bool xnack_mode = true;
1131 if (amdgpu_sriov_vf(adev) &&
1132 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))