]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
Merge tag 'for-5.13/libata-2021-04-27' of git://git.kernel.dk/linux-block
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.c
index 7d2c8b1698279cddb8e480401684ea62bf58e848..0ffdf847cad0a2e7c4575be981c8b72666c3c9dd 100644 (file)
@@ -92,13 +92,13 @@ struct amdgpu_prt_cb {
 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
 {
        mutex_lock(&vm->eviction_lock);
-       vm->saved_flags = memalloc_nofs_save();
+       vm->saved_flags = memalloc_noreclaim_save();
 }
 
 static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
 {
        if (mutex_trylock(&vm->eviction_lock)) {
-               vm->saved_flags = memalloc_nofs_save();
+               vm->saved_flags = memalloc_noreclaim_save();
                return 1;
        }
        return 0;
@@ -106,7 +106,7 @@ static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
 
 static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
 {
-       memalloc_nofs_restore(vm->saved_flags);
+       memalloc_noreclaim_restore(vm->saved_flags);
        mutex_unlock(&vm->eviction_lock);
 }
 
@@ -638,15 +638,15 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
        struct amdgpu_vm_bo_base *bo_base;
 
        if (vm->bulk_moveable) {
-               spin_lock(&ttm_bo_glob.lru_lock);
+               spin_lock(&adev->mman.bdev.lru_lock);
                ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
-               spin_unlock(&ttm_bo_glob.lru_lock);
+               spin_unlock(&adev->mman.bdev.lru_lock);
                return;
        }
 
        memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
 
-       spin_lock(&ttm_bo_glob.lru_lock);
+       spin_lock(&adev->mman.bdev.lru_lock);
        list_for_each_entry(bo_base, &vm->idle, vm_status) {
                struct amdgpu_bo *bo = bo_base->bo;
 
@@ -660,7 +660,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
                                                &bo->shadow->tbo.mem,
                                                &vm->lru_bulk_move);
        }
-       spin_unlock(&ttm_bo_glob.lru_lock);
+       spin_unlock(&adev->mman.bdev.lru_lock);
 
        vm->bulk_moveable = true;
 }
@@ -869,6 +869,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
        bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
                AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+       bp->bo_ptr_size = sizeof(struct amdgpu_bo);
        if (vm->use_cpu_for_update)
                bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
        else if (!vm->root.base.bo || vm->root.base.bo->shadow)
@@ -3300,7 +3301,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
        struct amdgpu_bo *root;
        uint64_t value, flags;
        struct amdgpu_vm *vm;
-       long r;
+       int r;
 
        spin_lock(&adev->vm_manager.pasid_lock);
        vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
@@ -3349,6 +3350,12 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
                value = 0;
        }
 
+       r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
+       if (r) {
+               pr_debug("failed %d to reserve fence slot\n", r);
+               goto error_unlock;
+       }
+
        r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr,
                                        addr, flags, value, NULL, NULL,
                                        NULL);
@@ -3360,7 +3367,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
 error_unlock:
        amdgpu_bo_unreserve(root);
        if (r < 0)
-               DRM_ERROR("Can't handle page fault (%ld)\n", r);
+               DRM_ERROR("Can't handle page fault (%d)\n", r);
 
 error_unref:
        amdgpu_bo_unref(&root);
This page took 0.038127 seconds and 4 git commands to generate.