}
}
mb();
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
for (i = 0; i < adev->num_vmhubs; i++)
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
* @adev: amdgpu_device pointer
* @offset: offset into the GPU's gart aperture
* @pages: number of pages to bind
- * @pagelist: pages to bind
* @dma_addr: DMA addresses of pages
* @flags: page table entry flags
*
* Returns 0 for success, -EINVAL for failure.
*/
int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
- int pages, struct page **pagelist, dma_addr_t *dma_addr,
+ int pages, dma_addr_t *dma_addr,
uint64_t flags)
{
if (!adev->gart.ready) {
int i;
mb();
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_device_flush_hdp(adev, NULL);
for (i = 0; i < adev->num_vmhubs; i++)
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
}