AMD_IP_BLOCK_TYPE_IH,
};
+ for (i = 0; i < adev->num_ip_blocks; i++)
+ adev->ip_blocks[i].status.hw = false;
+
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
int j;
struct amdgpu_ip_block *block;
for (j = 0; j < adev->num_ip_blocks; j++) {
block = &adev->ip_blocks[j];
- block->status.hw = false;
if (block->version->type != ip_order[i] ||
!block->status.valid)
continue;
amdgpu_virt_init_data_exchange(adev);
/* we need recover gart prior to run SMC/CP/SDMA resume */
- amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
+ amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
r = amdgpu_device_fw_loading(adev);
if (r)
amdgpu_inc_vram_lost(tmp_adev);
}
- r = amdgpu_gtt_mgr_recover(
- &tmp_adev->mman.bdev.man[TTM_PL_TT]);
+ r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
if (r)
goto out;