void amdgpu_virt_init_setting(struct amdgpu_device *adev)
{
+ struct drm_device *ddev = adev_to_drm(adev);
+
/* enable virtual display */
if (adev->mode_info.num_crtc == 0)
adev->mode_info.num_crtc = 1;
adev->enable_virtual_display = true;
- adev_to_drm(adev)->driver->driver_features &= ~DRIVER_ATOMIC;
+ ddev->driver_features &= ~DRIVER_ATOMIC;
adev->cg_flags = 0;
adev->pg_flags = 0;
}
/**
* amdgpu_virt_request_full_gpu() - request full gpu access
- * @amdgpu: amdgpu device.
+ * @adev: amdgpu device.
* @init: is driver init time.
* When start to init/fini driver, first need to request full gpu access.
* Return: Zero if request success, otherwise will return error.
/**
* amdgpu_virt_release_full_gpu() - release full gpu access
- * @amdgpu: amdgpu device.
+ * @adev: amdgpu device.
* @init: is driver init time.
* When finishing driver init/fini, need to release full gpu access.
* Return: Zero if release success, otherwise will returen error.
/**
* amdgpu_virt_reset_gpu() - reset gpu
- * @amdgpu: amdgpu device.
+ * @adev: amdgpu device.
* Send reset command to GPU hypervisor to reset GPU that VM is using
* Return: Zero if reset success, otherwise will return error.
*/
/**
* amdgpu_virt_wait_reset() - wait for reset gpu completed
- * @amdgpu: amdgpu device.
+ * @adev: amdgpu device.
* Wait for GPU reset completed.
* Return: Zero if reset success, otherwise will return error.
*/
/**
* amdgpu_virt_alloc_mm_table() - alloc memory for mm table
- * @amdgpu: amdgpu device.
+ * @adev: amdgpu device.
* MM table is used by UVD and VCE for its initialization
* Return: Zero if allocate success.
*/
/**
* amdgpu_virt_free_mm_table() - free mm table memory
- * @amdgpu: amdgpu device.
+ * @adev: amdgpu device.
* Free MM table memory
*/
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
if (!*data)
return -ENOMEM;
- bps = kmalloc(align_space * sizeof((*data)->bps), GFP_KERNEL);
- bps_bo = kmalloc(align_space * sizeof((*data)->bps_bo), GFP_KERNEL);
+ bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL);
+ bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL);
if (!bps || !bps_bo) {
kfree(bps);
return 0;
}
-void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
+static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
{
struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
+ int ret;
- amdgpu_virt_read_pf2vf_data(adev);
+ ret = amdgpu_virt_read_pf2vf_data(adev);
+ if (ret)
+ goto out;
amdgpu_virt_write_vf2pf_data(adev);
+out:
schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
}
{
if (adev->virt.vf2pf_update_interval_ms != 0) {
DRM_INFO("clean up the vf2pf work item\n");
- flush_delayed_work(&adev->virt.vf2pf_work);
cancel_delayed_work_sync(&adev->virt.vf2pf_work);
+ adev->virt.vf2pf_update_interval_ms = 0;
}
}