]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
Merge tag 'for-5.18-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
27
28 #include "amdgpu_object.h"
29 #include "amdgpu_gem.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_amdkfd.h"
32 #include "amdgpu_dma_buf.h"
33 #include <uapi/linux/kfd_ioctl.h>
34 #include "amdgpu_xgmi.h"
35
36 /* Userptr restore delay, just long enough to allow consecutive VM
37  * changes to accumulate
38  */
39 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
40
41 /* Impose limit on how much memory KFD can use */
42 static struct {
43         uint64_t max_system_mem_limit;
44         uint64_t max_ttm_mem_limit;
45         int64_t system_mem_used;
46         int64_t ttm_mem_used;
47         spinlock_t mem_limit_lock;
48 } kfd_mem_limit;
49
50 static const char * const domain_bit_to_string[] = {
51                 "CPU",
52                 "GTT",
53                 "VRAM",
54                 "GDS",
55                 "GWS",
56                 "OA"
57 };
58
59 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
60
61 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
62
63 static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
64                 struct kgd_mem *mem)
65 {
66         struct kfd_mem_attachment *entry;
67
68         list_for_each_entry(entry, &mem->attachments, list)
69                 if (entry->bo_va->base.vm == avm)
70                         return true;
71
72         return false;
73 }
74
75 /* Set memory usage limits. Current, limits are
76  *  System (TTM + userptr) memory - 15/16th System RAM
77  *  TTM memory - 3/8th System RAM
78  */
79 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
80 {
81         struct sysinfo si;
82         uint64_t mem;
83
84         si_meminfo(&si);
85         mem = si.freeram - si.freehigh;
86         mem *= si.mem_unit;
87
88         spin_lock_init(&kfd_mem_limit.mem_limit_lock);
89         kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
90         kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
91         pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
92                 (kfd_mem_limit.max_system_mem_limit >> 20),
93                 (kfd_mem_limit.max_ttm_mem_limit >> 20));
94 }
95
96 void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
97 {
98         kfd_mem_limit.system_mem_used += size;
99 }
100
101 /* Estimate page table size needed to represent a given memory size
102  *
103  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
104  * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
105  * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
106  * for 2MB pages for TLB efficiency. However, small allocations and
107  * fragmented system memory still need some 4KB pages. We choose a
108  * compromise that should work in most cases without reserving too
109  * much memory for page tables unnecessarily (factor 16K, >> 14).
110  */
111 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
112
113 static size_t amdgpu_amdkfd_acc_size(uint64_t size)
114 {
115         size >>= PAGE_SHIFT;
116         size *= sizeof(dma_addr_t) + sizeof(void *);
117
118         return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
119                 __roundup_pow_of_two(sizeof(struct ttm_tt)) +
120                 PAGE_ALIGN(size);
121 }
122
123 /**
124  * amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size
125  * of buffer including any reserved for control structures
126  *
127  * @adev: Device to which allocated BO belongs to
128  * @size: Size of buffer, in bytes, encapsulated by B0. This should be
129  * equivalent to amdgpu_bo_size(BO)
130  * @alloc_flag: Flag used in allocating a BO as noted above
131  *
132  * Return: returns -ENOMEM in case of error, ZERO otherwise
133  */
134 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
135                 uint64_t size, u32 alloc_flag)
136 {
137         uint64_t reserved_for_pt =
138                 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
139         size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
140         int ret = 0;
141
142         acc_size = amdgpu_amdkfd_acc_size(size);
143
144         vram_needed = 0;
145         if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
146                 system_mem_needed = acc_size + size;
147                 ttm_mem_needed = acc_size + size;
148         } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
149                 system_mem_needed = acc_size;
150                 ttm_mem_needed = acc_size;
151                 vram_needed = size;
152         } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
153                 system_mem_needed = acc_size + size;
154                 ttm_mem_needed = acc_size;
155         } else if (alloc_flag &
156                    (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
157                     KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
158                 system_mem_needed = acc_size;
159                 ttm_mem_needed = acc_size;
160         } else {
161                 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
162                 return -ENOMEM;
163         }
164
165         spin_lock(&kfd_mem_limit.mem_limit_lock);
166
167         if (kfd_mem_limit.system_mem_used + system_mem_needed >
168             kfd_mem_limit.max_system_mem_limit)
169                 pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
170
171         if ((kfd_mem_limit.system_mem_used + system_mem_needed >
172              kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
173             (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
174              kfd_mem_limit.max_ttm_mem_limit) ||
175             (adev->kfd.vram_used + vram_needed >
176              adev->gmc.real_vram_size - reserved_for_pt)) {
177                 ret = -ENOMEM;
178                 goto release;
179         }
180
181         /* Update memory accounting by decreasing available system
182          * memory, TTM memory and GPU memory as computed above
183          */
184         adev->kfd.vram_used += vram_needed;
185         kfd_mem_limit.system_mem_used += system_mem_needed;
186         kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
187
188 release:
189         spin_unlock(&kfd_mem_limit.mem_limit_lock);
190         return ret;
191 }
192
193 static void unreserve_mem_limit(struct amdgpu_device *adev,
194                 uint64_t size, u32 alloc_flag)
195 {
196         size_t acc_size;
197
198         acc_size = amdgpu_amdkfd_acc_size(size);
199
200         spin_lock(&kfd_mem_limit.mem_limit_lock);
201
202         if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
203                 kfd_mem_limit.system_mem_used -= (acc_size + size);
204                 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
205         } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
206                 kfd_mem_limit.system_mem_used -= acc_size;
207                 kfd_mem_limit.ttm_mem_used -= acc_size;
208                 adev->kfd.vram_used -= size;
209         } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
210                 kfd_mem_limit.system_mem_used -= (acc_size + size);
211                 kfd_mem_limit.ttm_mem_used -= acc_size;
212         } else if (alloc_flag &
213                    (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
214                     KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
215                 kfd_mem_limit.system_mem_used -= acc_size;
216                 kfd_mem_limit.ttm_mem_used -= acc_size;
217         } else {
218                 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
219                 goto release;
220         }
221
222         WARN_ONCE(adev->kfd.vram_used < 0,
223                   "KFD VRAM memory accounting unbalanced");
224         WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
225                   "KFD TTM memory accounting unbalanced");
226         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
227                   "KFD system memory accounting unbalanced");
228
229 release:
230         spin_unlock(&kfd_mem_limit.mem_limit_lock);
231 }
232
233 void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
234 {
235         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
236         u32 alloc_flags = bo->kfd_bo->alloc_flags;
237         u64 size = amdgpu_bo_size(bo);
238
239         unreserve_mem_limit(adev, size, alloc_flags);
240
241         kfree(bo->kfd_bo);
242 }
243
244 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
245  *  reservation object.
246  *
247  * @bo: [IN] Remove eviction fence(s) from this BO
248  * @ef: [IN] This eviction fence is removed if it
249  *  is present in the shared list.
250  *
251  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
252  */
253 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
254                                         struct amdgpu_amdkfd_fence *ef)
255 {
256         struct dma_resv *resv = bo->tbo.base.resv;
257         struct dma_resv_list *old, *new;
258         unsigned int i, j, k;
259
260         if (!ef)
261                 return -EINVAL;
262
263         old = dma_resv_shared_list(resv);
264         if (!old)
265                 return 0;
266
267         new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
268         if (!new)
269                 return -ENOMEM;
270
271         /* Go through all the shared fences in the resevation object and sort
272          * the interesting ones to the end of the list.
273          */
274         for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
275                 struct dma_fence *f;
276
277                 f = rcu_dereference_protected(old->shared[i],
278                                               dma_resv_held(resv));
279
280                 if (f->context == ef->base.context)
281                         RCU_INIT_POINTER(new->shared[--j], f);
282                 else
283                         RCU_INIT_POINTER(new->shared[k++], f);
284         }
285         new->shared_max = old->shared_max;
286         new->shared_count = k;
287
288         /* Install the new fence list, seqcount provides the barriers */
289         write_seqcount_begin(&resv->seq);
290         RCU_INIT_POINTER(resv->fence, new);
291         write_seqcount_end(&resv->seq);
292
293         /* Drop the references to the removed fences or move them to ef_list */
294         for (i = j; i < old->shared_count; ++i) {
295                 struct dma_fence *f;
296
297                 f = rcu_dereference_protected(new->shared[i],
298                                               dma_resv_held(resv));
299                 dma_fence_put(f);
300         }
301         kfree_rcu(old, rcu);
302
303         return 0;
304 }
305
306 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
307 {
308         struct amdgpu_bo *root = bo;
309         struct amdgpu_vm_bo_base *vm_bo;
310         struct amdgpu_vm *vm;
311         struct amdkfd_process_info *info;
312         struct amdgpu_amdkfd_fence *ef;
313         int ret;
314
315         /* we can always get vm_bo from root PD bo.*/
316         while (root->parent)
317                 root = root->parent;
318
319         vm_bo = root->vm_bo;
320         if (!vm_bo)
321                 return 0;
322
323         vm = vm_bo->vm;
324         if (!vm)
325                 return 0;
326
327         info = vm->process_info;
328         if (!info || !info->eviction_fence)
329                 return 0;
330
331         ef = container_of(dma_fence_get(&info->eviction_fence->base),
332                         struct amdgpu_amdkfd_fence, base);
333
334         BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
335         ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
336         dma_resv_unlock(bo->tbo.base.resv);
337
338         dma_fence_put(&ef->base);
339         return ret;
340 }
341
342 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
343                                      bool wait)
344 {
345         struct ttm_operation_ctx ctx = { false, false };
346         int ret;
347
348         if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
349                  "Called with userptr BO"))
350                 return -EINVAL;
351
352         amdgpu_bo_placement_from_domain(bo, domain);
353
354         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
355         if (ret)
356                 goto validate_fail;
357         if (wait)
358                 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
359
360 validate_fail:
361         return ret;
362 }
363
364 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
365 {
366         return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
367 }
368
369 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
370  *
371  * Page directories are not updated here because huge page handling
372  * during page table updates can invalidate page directory entries
373  * again. Page directories are only updated after updating page
374  * tables.
375  */
376 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
377 {
378         struct amdgpu_bo *pd = vm->root.bo;
379         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
380         int ret;
381
382         ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
383         if (ret) {
384                 pr_err("failed to validate PT BOs\n");
385                 return ret;
386         }
387
388         ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
389         if (ret) {
390                 pr_err("failed to validate PD\n");
391                 return ret;
392         }
393
394         vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
395
396         if (vm->use_cpu_for_update) {
397                 ret = amdgpu_bo_kmap(pd, NULL);
398                 if (ret) {
399                         pr_err("failed to kmap PD, ret=%d\n", ret);
400                         return ret;
401                 }
402         }
403
404         return 0;
405 }
406
407 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
408 {
409         struct amdgpu_bo *pd = vm->root.bo;
410         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
411         int ret;
412
413         ret = amdgpu_vm_update_pdes(adev, vm, false);
414         if (ret)
415                 return ret;
416
417         return amdgpu_sync_fence(sync, vm->last_update);
418 }
419
420 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
421 {
422         struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
423         bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
424         bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED;
425         uint32_t mapping_flags;
426         uint64_t pte_flags;
427         bool snoop = false;
428
429         mapping_flags = AMDGPU_VM_PAGE_READABLE;
430         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
431                 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
432         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
433                 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
434
435         switch (adev->asic_type) {
436         case CHIP_ARCTURUS:
437                 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
438                         if (bo_adev == adev)
439                                 mapping_flags |= coherent ?
440                                         AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
441                         else
442                                 mapping_flags |= coherent ?
443                                         AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
444                 } else {
445                         mapping_flags |= coherent ?
446                                 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
447                 }
448                 break;
449         case CHIP_ALDEBARAN:
450                 if (coherent && uncached) {
451                         if (adev->gmc.xgmi.connected_to_cpu ||
452                                 !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
453                                 snoop = true;
454                         mapping_flags |= AMDGPU_VM_MTYPE_UC;
455                 } else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
456                         if (bo_adev == adev) {
457                                 mapping_flags |= coherent ?
458                                         AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
459                                 if (adev->gmc.xgmi.connected_to_cpu)
460                                         snoop = true;
461                         } else {
462                                 mapping_flags |= coherent ?
463                                         AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
464                                 if (amdgpu_xgmi_same_hive(adev, bo_adev))
465                                         snoop = true;
466                         }
467                 } else {
468                         snoop = true;
469                         mapping_flags |= coherent ?
470                                 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
471                 }
472                 break;
473         default:
474                 mapping_flags |= coherent ?
475                         AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
476         }
477
478         pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
479         pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
480
481         return pte_flags;
482 }
483
484 static int
485 kfd_mem_dmamap_userptr(struct kgd_mem *mem,
486                        struct kfd_mem_attachment *attachment)
487 {
488         enum dma_data_direction direction =
489                 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
490                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
491         struct ttm_operation_ctx ctx = {.interruptible = true};
492         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
493         struct amdgpu_device *adev = attachment->adev;
494         struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
495         struct ttm_tt *ttm = bo->tbo.ttm;
496         int ret;
497
498         ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
499         if (unlikely(!ttm->sg))
500                 return -ENOMEM;
501
502         if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
503                 return -EINVAL;
504
505         /* Same sequence as in amdgpu_ttm_tt_pin_userptr */
506         ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
507                                         ttm->num_pages, 0,
508                                         (u64)ttm->num_pages << PAGE_SHIFT,
509                                         GFP_KERNEL);
510         if (unlikely(ret))
511                 goto free_sg;
512
513         ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
514         if (unlikely(ret))
515                 goto release_sg;
516
517         drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address,
518                                        ttm->num_pages);
519
520         amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
521         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
522         if (ret)
523                 goto unmap_sg;
524
525         return 0;
526
527 unmap_sg:
528         dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
529 release_sg:
530         pr_err("DMA map userptr failed: %d\n", ret);
531         sg_free_table(ttm->sg);
532 free_sg:
533         kfree(ttm->sg);
534         ttm->sg = NULL;
535         return ret;
536 }
537
538 static int
539 kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
540 {
541         struct ttm_operation_ctx ctx = {.interruptible = true};
542         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
543
544         amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
545         return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
546 }
547
548 static int
549 kfd_mem_dmamap_attachment(struct kgd_mem *mem,
550                           struct kfd_mem_attachment *attachment)
551 {
552         switch (attachment->type) {
553         case KFD_MEM_ATT_SHARED:
554                 return 0;
555         case KFD_MEM_ATT_USERPTR:
556                 return kfd_mem_dmamap_userptr(mem, attachment);
557         case KFD_MEM_ATT_DMABUF:
558                 return kfd_mem_dmamap_dmabuf(attachment);
559         default:
560                 WARN_ON_ONCE(1);
561         }
562         return -EINVAL;
563 }
564
565 static void
566 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
567                          struct kfd_mem_attachment *attachment)
568 {
569         enum dma_data_direction direction =
570                 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
571                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
572         struct ttm_operation_ctx ctx = {.interruptible = false};
573         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
574         struct amdgpu_device *adev = attachment->adev;
575         struct ttm_tt *ttm = bo->tbo.ttm;
576
577         if (unlikely(!ttm->sg))
578                 return;
579
580         amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
581         ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
582
583         dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
584         sg_free_table(ttm->sg);
585         kfree(ttm->sg);
586         ttm->sg = NULL;
587 }
588
589 static void
590 kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
591 {
592         struct ttm_operation_ctx ctx = {.interruptible = true};
593         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
594
595         amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
596         ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
597 }
598
599 static void
600 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
601                             struct kfd_mem_attachment *attachment)
602 {
603         switch (attachment->type) {
604         case KFD_MEM_ATT_SHARED:
605                 break;
606         case KFD_MEM_ATT_USERPTR:
607                 kfd_mem_dmaunmap_userptr(mem, attachment);
608                 break;
609         case KFD_MEM_ATT_DMABUF:
610                 kfd_mem_dmaunmap_dmabuf(attachment);
611                 break;
612         default:
613                 WARN_ON_ONCE(1);
614         }
615 }
616
617 static int
618 kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem,
619                        struct amdgpu_bo **bo)
620 {
621         unsigned long bo_size = mem->bo->tbo.base.size;
622         struct drm_gem_object *gobj;
623         int ret;
624
625         ret = amdgpu_bo_reserve(mem->bo, false);
626         if (ret)
627                 return ret;
628
629         ret = amdgpu_gem_object_create(adev, bo_size, 1,
630                                        AMDGPU_GEM_DOMAIN_CPU,
631                                        AMDGPU_GEM_CREATE_PREEMPTIBLE,
632                                        ttm_bo_type_sg, mem->bo->tbo.base.resv,
633                                        &gobj);
634         amdgpu_bo_unreserve(mem->bo);
635         if (ret)
636                 return ret;
637
638         *bo = gem_to_amdgpu_bo(gobj);
639         (*bo)->parent = amdgpu_bo_ref(mem->bo);
640
641         return 0;
642 }
643
644 static int
645 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
646                       struct amdgpu_bo **bo)
647 {
648         struct drm_gem_object *gobj;
649         int ret;
650
651         if (!mem->dmabuf) {
652                 mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
653                         mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
654                                 DRM_RDWR : 0);
655                 if (IS_ERR(mem->dmabuf)) {
656                         ret = PTR_ERR(mem->dmabuf);
657                         mem->dmabuf = NULL;
658                         return ret;
659                 }
660         }
661
662         gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
663         if (IS_ERR(gobj))
664                 return PTR_ERR(gobj);
665
666         *bo = gem_to_amdgpu_bo(gobj);
667         (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
668         (*bo)->parent = amdgpu_bo_ref(mem->bo);
669
670         return 0;
671 }
672
673 /* kfd_mem_attach - Add a BO to a VM
674  *
675  * Everything that needs to bo done only once when a BO is first added
676  * to a VM. It can later be mapped and unmapped many times without
677  * repeating these steps.
678  *
679  * 0. Create BO for DMA mapping, if needed
680  * 1. Allocate and initialize BO VA entry data structure
681  * 2. Add BO to the VM
682  * 3. Determine ASIC-specific PTE flags
683  * 4. Alloc page tables and directories if needed
684  * 4a.  Validate new page tables and directories
685  */
686 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
687                 struct amdgpu_vm *vm, bool is_aql)
688 {
689         struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
690         unsigned long bo_size = mem->bo->tbo.base.size;
691         uint64_t va = mem->va;
692         struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
693         struct amdgpu_bo *bo[2] = {NULL, NULL};
694         int i, ret;
695
696         if (!va) {
697                 pr_err("Invalid VA when adding BO to VM\n");
698                 return -EINVAL;
699         }
700
701         for (i = 0; i <= is_aql; i++) {
702                 attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
703                 if (unlikely(!attachment[i])) {
704                         ret = -ENOMEM;
705                         goto unwind;
706                 }
707
708                 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
709                          va + bo_size, vm);
710
711                 if (adev == bo_adev ||
712                    (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && adev->ram_is_direct_mapped) ||
713                    (mem->domain == AMDGPU_GEM_DOMAIN_VRAM && amdgpu_xgmi_same_hive(adev, bo_adev))) {
714                         /* Mappings on the local GPU, or VRAM mappings in the
715                          * local hive, or userptr mapping IOMMU direct map mode
716                          * share the original BO
717                          */
718                         attachment[i]->type = KFD_MEM_ATT_SHARED;
719                         bo[i] = mem->bo;
720                         drm_gem_object_get(&bo[i]->tbo.base);
721                 } else if (i > 0) {
722                         /* Multiple mappings on the same GPU share the BO */
723                         attachment[i]->type = KFD_MEM_ATT_SHARED;
724                         bo[i] = bo[0];
725                         drm_gem_object_get(&bo[i]->tbo.base);
726                 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
727                         /* Create an SG BO to DMA-map userptrs on other GPUs */
728                         attachment[i]->type = KFD_MEM_ATT_USERPTR;
729                         ret = kfd_mem_attach_userptr(adev, mem, &bo[i]);
730                         if (ret)
731                                 goto unwind;
732                 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT &&
733                            mem->bo->tbo.type != ttm_bo_type_sg) {
734                         /* GTT BOs use DMA-mapping ability of dynamic-attach
735                          * DMA bufs. TODO: The same should work for VRAM on
736                          * large-BAR GPUs.
737                          */
738                         attachment[i]->type = KFD_MEM_ATT_DMABUF;
739                         ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
740                         if (ret)
741                                 goto unwind;
742                 } else {
743                         /* FIXME: Need to DMA-map other BO types:
744                          * large-BAR VRAM, doorbells, MMIO remap
745                          */
746                         attachment[i]->type = KFD_MEM_ATT_SHARED;
747                         bo[i] = mem->bo;
748                         drm_gem_object_get(&bo[i]->tbo.base);
749                 }
750
751                 /* Add BO to VM internal data structures */
752                 ret = amdgpu_bo_reserve(bo[i], false);
753                 if (ret) {
754                         pr_debug("Unable to reserve BO during memory attach");
755                         goto unwind;
756                 }
757                 attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
758                 amdgpu_bo_unreserve(bo[i]);
759                 if (unlikely(!attachment[i]->bo_va)) {
760                         ret = -ENOMEM;
761                         pr_err("Failed to add BO object to VM. ret == %d\n",
762                                ret);
763                         goto unwind;
764                 }
765                 attachment[i]->va = va;
766                 attachment[i]->pte_flags = get_pte_flags(adev, mem);
767                 attachment[i]->adev = adev;
768                 list_add(&attachment[i]->list, &mem->attachments);
769
770                 va += bo_size;
771         }
772
773         return 0;
774
775 unwind:
776         for (; i >= 0; i--) {
777                 if (!attachment[i])
778                         continue;
779                 if (attachment[i]->bo_va) {
780                         amdgpu_bo_reserve(bo[i], true);
781                         amdgpu_vm_bo_del(adev, attachment[i]->bo_va);
782                         amdgpu_bo_unreserve(bo[i]);
783                         list_del(&attachment[i]->list);
784                 }
785                 if (bo[i])
786                         drm_gem_object_put(&bo[i]->tbo.base);
787                 kfree(attachment[i]);
788         }
789         return ret;
790 }
791
792 static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
793 {
794         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
795
796         pr_debug("\t remove VA 0x%llx in entry %p\n",
797                         attachment->va, attachment);
798         amdgpu_vm_bo_del(attachment->adev, attachment->bo_va);
799         drm_gem_object_put(&bo->tbo.base);
800         list_del(&attachment->list);
801         kfree(attachment);
802 }
803
804 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
805                                 struct amdkfd_process_info *process_info,
806                                 bool userptr)
807 {
808         struct ttm_validate_buffer *entry = &mem->validate_list;
809         struct amdgpu_bo *bo = mem->bo;
810
811         INIT_LIST_HEAD(&entry->head);
812         entry->num_shared = 1;
813         entry->bo = &bo->tbo;
814         mutex_lock(&process_info->lock);
815         if (userptr)
816                 list_add_tail(&entry->head, &process_info->userptr_valid_list);
817         else
818                 list_add_tail(&entry->head, &process_info->kfd_bo_list);
819         mutex_unlock(&process_info->lock);
820 }
821
822 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
823                 struct amdkfd_process_info *process_info)
824 {
825         struct ttm_validate_buffer *bo_list_entry;
826
827         bo_list_entry = &mem->validate_list;
828         mutex_lock(&process_info->lock);
829         list_del(&bo_list_entry->head);
830         mutex_unlock(&process_info->lock);
831 }
832
833 /* Initializes user pages. It registers the MMU notifier and validates
834  * the userptr BO in the GTT domain.
835  *
836  * The BO must already be on the userptr_valid_list. Otherwise an
837  * eviction and restore may happen that leaves the new BO unmapped
838  * with the user mode queues running.
839  *
840  * Takes the process_info->lock to protect against concurrent restore
841  * workers.
842  *
843  * Returns 0 for success, negative errno for errors.
844  */
845 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
846                            bool criu_resume)
847 {
848         struct amdkfd_process_info *process_info = mem->process_info;
849         struct amdgpu_bo *bo = mem->bo;
850         struct ttm_operation_ctx ctx = { true, false };
851         int ret = 0;
852
853         mutex_lock(&process_info->lock);
854
855         ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
856         if (ret) {
857                 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
858                 goto out;
859         }
860
861         ret = amdgpu_mn_register(bo, user_addr);
862         if (ret) {
863                 pr_err("%s: Failed to register MMU notifier: %d\n",
864                        __func__, ret);
865                 goto out;
866         }
867
868         if (criu_resume) {
869                 /*
870                  * During a CRIU restore operation, the userptr buffer objects
871                  * will be validated in the restore_userptr_work worker at a
872                  * later stage when it is scheduled by another ioctl called by
873                  * CRIU master process for the target pid for restore.
874                  */
875                 atomic_inc(&mem->invalid);
876                 mutex_unlock(&process_info->lock);
877                 return 0;
878         }
879
880         ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
881         if (ret) {
882                 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
883                 goto unregister_out;
884         }
885
886         ret = amdgpu_bo_reserve(bo, true);
887         if (ret) {
888                 pr_err("%s: Failed to reserve BO\n", __func__);
889                 goto release_out;
890         }
891         amdgpu_bo_placement_from_domain(bo, mem->domain);
892         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
893         if (ret)
894                 pr_err("%s: failed to validate BO\n", __func__);
895         amdgpu_bo_unreserve(bo);
896
897 release_out:
898         amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
899 unregister_out:
900         if (ret)
901                 amdgpu_mn_unregister(bo);
902 out:
903         mutex_unlock(&process_info->lock);
904         return ret;
905 }
906
907 /* Reserving a BO and its page table BOs must happen atomically to
908  * avoid deadlocks. Some operations update multiple VMs at once. Track
909  * all the reservation info in a context structure. Optionally a sync
910  * object can track VM updates.
911  */
912 struct bo_vm_reservation_context {
913         struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
914         unsigned int n_vms;                 /* Number of VMs reserved       */
915         struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
916         struct ww_acquire_ctx ticket;       /* Reservation ticket           */
917         struct list_head list, duplicates;  /* BO lists                     */
918         struct amdgpu_sync *sync;           /* Pointer to sync object       */
919         bool reserved;                      /* Whether BOs are reserved     */
920 };
921
922 enum bo_vm_match {
923         BO_VM_NOT_MAPPED = 0,   /* Match VMs where a BO is not mapped */
924         BO_VM_MAPPED,           /* Match VMs where a BO is mapped     */
925         BO_VM_ALL,              /* Match all VMs a BO was added to    */
926 };
927
928 /**
929  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
930  * @mem: KFD BO structure.
931  * @vm: the VM to reserve.
932  * @ctx: the struct that will be used in unreserve_bo_and_vms().
933  */
934 static int reserve_bo_and_vm(struct kgd_mem *mem,
935                               struct amdgpu_vm *vm,
936                               struct bo_vm_reservation_context *ctx)
937 {
938         struct amdgpu_bo *bo = mem->bo;
939         int ret;
940
941         WARN_ON(!vm);
942
943         ctx->reserved = false;
944         ctx->n_vms = 1;
945         ctx->sync = &mem->sync;
946
947         INIT_LIST_HEAD(&ctx->list);
948         INIT_LIST_HEAD(&ctx->duplicates);
949
950         ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
951         if (!ctx->vm_pd)
952                 return -ENOMEM;
953
954         ctx->kfd_bo.priority = 0;
955         ctx->kfd_bo.tv.bo = &bo->tbo;
956         ctx->kfd_bo.tv.num_shared = 1;
957         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
958
959         amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
960
961         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
962                                      false, &ctx->duplicates);
963         if (ret) {
964                 pr_err("Failed to reserve buffers in ttm.\n");
965                 kfree(ctx->vm_pd);
966                 ctx->vm_pd = NULL;
967                 return ret;
968         }
969
970         ctx->reserved = true;
971         return 0;
972 }
973
974 /**
975  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
976  * @mem: KFD BO structure.
977  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
978  * is used. Otherwise, a single VM associated with the BO.
979  * @map_type: the mapping status that will be used to filter the VMs.
980  * @ctx: the struct that will be used in unreserve_bo_and_vms().
981  *
982  * Returns 0 for success, negative for failure.
983  */
984 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
985                                 struct amdgpu_vm *vm, enum bo_vm_match map_type,
986                                 struct bo_vm_reservation_context *ctx)
987 {
988         struct amdgpu_bo *bo = mem->bo;
989         struct kfd_mem_attachment *entry;
990         unsigned int i;
991         int ret;
992
993         ctx->reserved = false;
994         ctx->n_vms = 0;
995         ctx->vm_pd = NULL;
996         ctx->sync = &mem->sync;
997
998         INIT_LIST_HEAD(&ctx->list);
999         INIT_LIST_HEAD(&ctx->duplicates);
1000
1001         list_for_each_entry(entry, &mem->attachments, list) {
1002                 if ((vm && vm != entry->bo_va->base.vm) ||
1003                         (entry->is_mapped != map_type
1004                         && map_type != BO_VM_ALL))
1005                         continue;
1006
1007                 ctx->n_vms++;
1008         }
1009
1010         if (ctx->n_vms != 0) {
1011                 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
1012                                      GFP_KERNEL);
1013                 if (!ctx->vm_pd)
1014                         return -ENOMEM;
1015         }
1016
1017         ctx->kfd_bo.priority = 0;
1018         ctx->kfd_bo.tv.bo = &bo->tbo;
1019         ctx->kfd_bo.tv.num_shared = 1;
1020         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
1021
1022         i = 0;
1023         list_for_each_entry(entry, &mem->attachments, list) {
1024                 if ((vm && vm != entry->bo_va->base.vm) ||
1025                         (entry->is_mapped != map_type
1026                         && map_type != BO_VM_ALL))
1027                         continue;
1028
1029                 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
1030                                 &ctx->vm_pd[i]);
1031                 i++;
1032         }
1033
1034         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
1035                                      false, &ctx->duplicates);
1036         if (ret) {
1037                 pr_err("Failed to reserve buffers in ttm.\n");
1038                 kfree(ctx->vm_pd);
1039                 ctx->vm_pd = NULL;
1040                 return ret;
1041         }
1042
1043         ctx->reserved = true;
1044         return 0;
1045 }
1046
1047 /**
1048  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
1049  * @ctx: Reservation context to unreserve
1050  * @wait: Optionally wait for a sync object representing pending VM updates
1051  * @intr: Whether the wait is interruptible
1052  *
1053  * Also frees any resources allocated in
1054  * reserve_bo_and_(cond_)vm(s). Returns the status from
1055  * amdgpu_sync_wait.
1056  */
1057 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
1058                                  bool wait, bool intr)
1059 {
1060         int ret = 0;
1061
1062         if (wait)
1063                 ret = amdgpu_sync_wait(ctx->sync, intr);
1064
1065         if (ctx->reserved)
1066                 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
1067         kfree(ctx->vm_pd);
1068
1069         ctx->sync = NULL;
1070
1071         ctx->reserved = false;
1072         ctx->vm_pd = NULL;
1073
1074         return ret;
1075 }
1076
1077 static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
1078                                 struct kfd_mem_attachment *entry,
1079                                 struct amdgpu_sync *sync)
1080 {
1081         struct amdgpu_bo_va *bo_va = entry->bo_va;
1082         struct amdgpu_device *adev = entry->adev;
1083         struct amdgpu_vm *vm = bo_va->base.vm;
1084
1085         amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
1086
1087         amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
1088
1089         amdgpu_sync_fence(sync, bo_va->last_pt_update);
1090
1091         kfd_mem_dmaunmap_attachment(mem, entry);
1092 }
1093
1094 static int update_gpuvm_pte(struct kgd_mem *mem,
1095                             struct kfd_mem_attachment *entry,
1096                             struct amdgpu_sync *sync,
1097                             bool *table_freed)
1098 {
1099         struct amdgpu_bo_va *bo_va = entry->bo_va;
1100         struct amdgpu_device *adev = entry->adev;
1101         int ret;
1102
1103         ret = kfd_mem_dmamap_attachment(mem, entry);
1104         if (ret)
1105                 return ret;
1106
1107         /* Update the page tables  */
1108         ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed);
1109         if (ret) {
1110                 pr_err("amdgpu_vm_bo_update failed\n");
1111                 return ret;
1112         }
1113
1114         return amdgpu_sync_fence(sync, bo_va->last_pt_update);
1115 }
1116
1117 static int map_bo_to_gpuvm(struct kgd_mem *mem,
1118                            struct kfd_mem_attachment *entry,
1119                            struct amdgpu_sync *sync,
1120                            bool no_update_pte,
1121                            bool *table_freed)
1122 {
1123         int ret;
1124
1125         /* Set virtual address for the allocation */
1126         ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1127                                amdgpu_bo_size(entry->bo_va->base.bo),
1128                                entry->pte_flags);
1129         if (ret) {
1130                 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1131                                 entry->va, ret);
1132                 return ret;
1133         }
1134
1135         if (no_update_pte)
1136                 return 0;
1137
1138         ret = update_gpuvm_pte(mem, entry, sync, table_freed);
1139         if (ret) {
1140                 pr_err("update_gpuvm_pte() failed\n");
1141                 goto update_gpuvm_pte_failed;
1142         }
1143
1144         return 0;
1145
1146 update_gpuvm_pte_failed:
1147         unmap_bo_from_gpuvm(mem, entry, sync);
1148         return ret;
1149 }
1150
1151 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
1152 {
1153         struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
1154
1155         if (!sg)
1156                 return NULL;
1157         if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
1158                 kfree(sg);
1159                 return NULL;
1160         }
1161         sg->sgl->dma_address = addr;
1162         sg->sgl->length = size;
1163 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1164         sg->sgl->dma_length = size;
1165 #endif
1166         return sg;
1167 }
1168
1169 static int process_validate_vms(struct amdkfd_process_info *process_info)
1170 {
1171         struct amdgpu_vm *peer_vm;
1172         int ret;
1173
1174         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1175                             vm_list_node) {
1176                 ret = vm_validate_pt_pd_bos(peer_vm);
1177                 if (ret)
1178                         return ret;
1179         }
1180
1181         return 0;
1182 }
1183
1184 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
1185                                  struct amdgpu_sync *sync)
1186 {
1187         struct amdgpu_vm *peer_vm;
1188         int ret;
1189
1190         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1191                             vm_list_node) {
1192                 struct amdgpu_bo *pd = peer_vm->root.bo;
1193
1194                 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1195                                        AMDGPU_SYNC_NE_OWNER,
1196                                        AMDGPU_FENCE_OWNER_KFD);
1197                 if (ret)
1198                         return ret;
1199         }
1200
1201         return 0;
1202 }
1203
1204 static int process_update_pds(struct amdkfd_process_info *process_info,
1205                               struct amdgpu_sync *sync)
1206 {
1207         struct amdgpu_vm *peer_vm;
1208         int ret;
1209
1210         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1211                             vm_list_node) {
1212                 ret = vm_update_pds(peer_vm, sync);
1213                 if (ret)
1214                         return ret;
1215         }
1216
1217         return 0;
1218 }
1219
1220 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
1221                        struct dma_fence **ef)
1222 {
1223         struct amdkfd_process_info *info = NULL;
1224         int ret;
1225
1226         if (!*process_info) {
1227                 info = kzalloc(sizeof(*info), GFP_KERNEL);
1228                 if (!info)
1229                         return -ENOMEM;
1230
1231                 mutex_init(&info->lock);
1232                 INIT_LIST_HEAD(&info->vm_list_head);
1233                 INIT_LIST_HEAD(&info->kfd_bo_list);
1234                 INIT_LIST_HEAD(&info->userptr_valid_list);
1235                 INIT_LIST_HEAD(&info->userptr_inval_list);
1236
1237                 info->eviction_fence =
1238                         amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
1239                                                    current->mm,
1240                                                    NULL);
1241                 if (!info->eviction_fence) {
1242                         pr_err("Failed to create eviction fence\n");
1243                         ret = -ENOMEM;
1244                         goto create_evict_fence_fail;
1245                 }
1246
1247                 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
1248                 atomic_set(&info->evicted_bos, 0);
1249                 INIT_DELAYED_WORK(&info->restore_userptr_work,
1250                                   amdgpu_amdkfd_restore_userptr_worker);
1251
1252                 *process_info = info;
1253                 *ef = dma_fence_get(&info->eviction_fence->base);
1254         }
1255
1256         vm->process_info = *process_info;
1257
1258         /* Validate page directory and attach eviction fence */
1259         ret = amdgpu_bo_reserve(vm->root.bo, true);
1260         if (ret)
1261                 goto reserve_pd_fail;
1262         ret = vm_validate_pt_pd_bos(vm);
1263         if (ret) {
1264                 pr_err("validate_pt_pd_bos() failed\n");
1265                 goto validate_pd_fail;
1266         }
1267         ret = amdgpu_bo_sync_wait(vm->root.bo,
1268                                   AMDGPU_FENCE_OWNER_KFD, false);
1269         if (ret)
1270                 goto wait_pd_fail;
1271         ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1);
1272         if (ret)
1273                 goto reserve_shared_fail;
1274         amdgpu_bo_fence(vm->root.bo,
1275                         &vm->process_info->eviction_fence->base, true);
1276         amdgpu_bo_unreserve(vm->root.bo);
1277
1278         /* Update process info */
1279         mutex_lock(&vm->process_info->lock);
1280         list_add_tail(&vm->vm_list_node,
1281                         &(vm->process_info->vm_list_head));
1282         vm->process_info->n_vms++;
1283         mutex_unlock(&vm->process_info->lock);
1284
1285         return 0;
1286
1287 reserve_shared_fail:
1288 wait_pd_fail:
1289 validate_pd_fail:
1290         amdgpu_bo_unreserve(vm->root.bo);
1291 reserve_pd_fail:
1292         vm->process_info = NULL;
1293         if (info) {
1294                 /* Two fence references: one in info and one in *ef */
1295                 dma_fence_put(&info->eviction_fence->base);
1296                 dma_fence_put(*ef);
1297                 *ef = NULL;
1298                 *process_info = NULL;
1299                 put_pid(info->pid);
1300 create_evict_fence_fail:
1301                 mutex_destroy(&info->lock);
1302                 kfree(info);
1303         }
1304         return ret;
1305 }
1306
1307 /**
1308  * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria
1309  * @bo: Handle of buffer object being pinned
1310  * @domain: Domain into which BO should be pinned
1311  *
1312  *   - USERPTR BOs are UNPINNABLE and will return error
1313  *   - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1314  *     PIN count incremented. It is valid to PIN a BO multiple times
1315  *
1316  * Return: ZERO if successful in pinning, Non-Zero in case of error.
1317  */
1318 static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain)
1319 {
1320         int ret = 0;
1321
1322         ret = amdgpu_bo_reserve(bo, false);
1323         if (unlikely(ret))
1324                 return ret;
1325
1326         ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0);
1327         if (ret)
1328                 pr_err("Error in Pinning BO to domain: %d\n", domain);
1329
1330         amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
1331         amdgpu_bo_unreserve(bo);
1332
1333         return ret;
1334 }
1335
1336 /**
1337  * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria
1338  * @bo: Handle of buffer object being unpinned
1339  *
1340  *   - Is a illegal request for USERPTR BOs and is ignored
1341  *   - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1342  *     PIN count decremented. Calls to UNPIN must balance calls to PIN
1343  */
1344 static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
1345 {
1346         int ret = 0;
1347
1348         ret = amdgpu_bo_reserve(bo, false);
1349         if (unlikely(ret))
1350                 return;
1351
1352         amdgpu_bo_unpin(bo);
1353         amdgpu_bo_unreserve(bo);
1354 }
1355
1356 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
1357                                            struct file *filp, u32 pasid,
1358                                            void **process_info,
1359                                            struct dma_fence **ef)
1360 {
1361         struct amdgpu_fpriv *drv_priv;
1362         struct amdgpu_vm *avm;
1363         int ret;
1364
1365         ret = amdgpu_file_to_fpriv(filp, &drv_priv);
1366         if (ret)
1367                 return ret;
1368         avm = &drv_priv->vm;
1369
1370         /* Already a compute VM? */
1371         if (avm->process_info)
1372                 return -EINVAL;
1373
1374         /* Free the original amdgpu allocated pasid,
1375          * will be replaced with kfd allocated pasid.
1376          */
1377         if (avm->pasid) {
1378                 amdgpu_pasid_free(avm->pasid);
1379                 amdgpu_vm_set_pasid(adev, avm, 0);
1380         }
1381
1382         /* Convert VM into a compute VM */
1383         ret = amdgpu_vm_make_compute(adev, avm);
1384         if (ret)
1385                 return ret;
1386
1387         ret = amdgpu_vm_set_pasid(adev, avm, pasid);
1388         if (ret)
1389                 return ret;
1390         /* Initialize KFD part of the VM and process info */
1391         ret = init_kfd_vm(avm, process_info, ef);
1392         if (ret)
1393                 return ret;
1394
1395         amdgpu_vm_set_task_info(avm);
1396
1397         return 0;
1398 }
1399
1400 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1401                                     struct amdgpu_vm *vm)
1402 {
1403         struct amdkfd_process_info *process_info = vm->process_info;
1404         struct amdgpu_bo *pd = vm->root.bo;
1405
1406         if (!process_info)
1407                 return;
1408
1409         /* Release eviction fence from PD */
1410         amdgpu_bo_reserve(pd, false);
1411         amdgpu_bo_fence(pd, NULL, false);
1412         amdgpu_bo_unreserve(pd);
1413
1414         /* Update process info */
1415         mutex_lock(&process_info->lock);
1416         process_info->n_vms--;
1417         list_del(&vm->vm_list_node);
1418         mutex_unlock(&process_info->lock);
1419
1420         vm->process_info = NULL;
1421
1422         /* Release per-process resources when last compute VM is destroyed */
1423         if (!process_info->n_vms) {
1424                 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1425                 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1426                 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1427
1428                 dma_fence_put(&process_info->eviction_fence->base);
1429                 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1430                 put_pid(process_info->pid);
1431                 mutex_destroy(&process_info->lock);
1432                 kfree(process_info);
1433         }
1434 }
1435
1436 void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
1437                                             void *drm_priv)
1438 {
1439         struct amdgpu_vm *avm;
1440
1441         if (WARN_ON(!adev || !drm_priv))
1442                 return;
1443
1444         avm = drm_priv_to_vm(drm_priv);
1445
1446         pr_debug("Releasing process vm %p\n", avm);
1447
1448         /* The original pasid of amdgpu vm has already been
1449          * released during making a amdgpu vm to a compute vm
1450          * The current pasid is managed by kfd and will be
1451          * released on kfd process destroy. Set amdgpu pasid
1452          * to 0 to avoid duplicate release.
1453          */
1454         amdgpu_vm_release_compute(adev, avm);
1455 }
1456
1457 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
1458 {
1459         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1460         struct amdgpu_bo *pd = avm->root.bo;
1461         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1462
1463         if (adev->asic_type < CHIP_VEGA10)
1464                 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1465         return avm->pd_phys_addr;
1466 }
1467
1468 void amdgpu_amdkfd_block_mmu_notifications(void *p)
1469 {
1470         struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
1471
1472         mutex_lock(&pinfo->lock);
1473         WRITE_ONCE(pinfo->block_mmu_notifications, true);
1474         mutex_unlock(&pinfo->lock);
1475 }
1476
1477 int amdgpu_amdkfd_criu_resume(void *p)
1478 {
1479         int ret = 0;
1480         struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
1481
1482         mutex_lock(&pinfo->lock);
1483         pr_debug("scheduling work\n");
1484         atomic_inc(&pinfo->evicted_bos);
1485         if (!READ_ONCE(pinfo->block_mmu_notifications)) {
1486                 ret = -EINVAL;
1487                 goto out_unlock;
1488         }
1489         WRITE_ONCE(pinfo->block_mmu_notifications, false);
1490         schedule_delayed_work(&pinfo->restore_userptr_work, 0);
1491
1492 out_unlock:
1493         mutex_unlock(&pinfo->lock);
1494         return ret;
1495 }
1496
1497 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1498                 struct amdgpu_device *adev, uint64_t va, uint64_t size,
1499                 void *drm_priv, struct kgd_mem **mem,
1500                 uint64_t *offset, uint32_t flags, bool criu_resume)
1501 {
1502         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1503         enum ttm_bo_type bo_type = ttm_bo_type_device;
1504         struct sg_table *sg = NULL;
1505         uint64_t user_addr = 0;
1506         struct amdgpu_bo *bo;
1507         struct drm_gem_object *gobj = NULL;
1508         u32 domain, alloc_domain;
1509         u64 alloc_flags;
1510         int ret;
1511
1512         /*
1513          * Check on which domain to allocate BO
1514          */
1515         if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1516                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1517                 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1518                 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1519                         AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
1520         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1521                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1522                 alloc_flags = 0;
1523         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1524                 domain = AMDGPU_GEM_DOMAIN_GTT;
1525                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1526                 alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
1527                 if (!offset || !*offset)
1528                         return -EINVAL;
1529                 user_addr = untagged_addr(*offset);
1530         } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1531                         KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1532                 domain = AMDGPU_GEM_DOMAIN_GTT;
1533                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1534                 bo_type = ttm_bo_type_sg;
1535                 alloc_flags = 0;
1536                 if (size > UINT_MAX)
1537                         return -EINVAL;
1538                 sg = create_doorbell_sg(*offset, size);
1539                 if (!sg)
1540                         return -ENOMEM;
1541         } else {
1542                 return -EINVAL;
1543         }
1544
1545         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1546         if (!*mem) {
1547                 ret = -ENOMEM;
1548                 goto err;
1549         }
1550         INIT_LIST_HEAD(&(*mem)->attachments);
1551         mutex_init(&(*mem)->lock);
1552         (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1553
1554         /* Workaround for AQL queue wraparound bug. Map the same
1555          * memory twice. That means we only actually allocate half
1556          * the memory.
1557          */
1558         if ((*mem)->aql_queue)
1559                 size = size >> 1;
1560
1561         (*mem)->alloc_flags = flags;
1562
1563         amdgpu_sync_create(&(*mem)->sync);
1564
1565         ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, flags);
1566         if (ret) {
1567                 pr_debug("Insufficient memory\n");
1568                 goto err_reserve_limit;
1569         }
1570
1571         pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1572                         va, size, domain_string(alloc_domain));
1573
1574         ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
1575                                        bo_type, NULL, &gobj);
1576         if (ret) {
1577                 pr_debug("Failed to create BO on domain %s. ret %d\n",
1578                          domain_string(alloc_domain), ret);
1579                 goto err_bo_create;
1580         }
1581         ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1582         if (ret) {
1583                 pr_debug("Failed to allow vma node access. ret %d\n", ret);
1584                 goto err_node_allow;
1585         }
1586         bo = gem_to_amdgpu_bo(gobj);
1587         if (bo_type == ttm_bo_type_sg) {
1588                 bo->tbo.sg = sg;
1589                 bo->tbo.ttm->sg = sg;
1590         }
1591         bo->kfd_bo = *mem;
1592         (*mem)->bo = bo;
1593         if (user_addr)
1594                 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
1595
1596         (*mem)->va = va;
1597         (*mem)->domain = domain;
1598         (*mem)->mapped_to_gpu_memory = 0;
1599         (*mem)->process_info = avm->process_info;
1600         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1601
1602         if (user_addr) {
1603                 pr_debug("creating userptr BO for user_addr = %llu\n", user_addr);
1604                 ret = init_user_pages(*mem, user_addr, criu_resume);
1605                 if (ret)
1606                         goto allocate_init_user_pages_failed;
1607         } else  if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1608                                 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1609                 ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT);
1610                 if (ret) {
1611                         pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n");
1612                         goto err_pin_bo;
1613                 }
1614                 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
1615                 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
1616         }
1617
1618         if (offset)
1619                 *offset = amdgpu_bo_mmap_offset(bo);
1620
1621         return 0;
1622
1623 allocate_init_user_pages_failed:
1624 err_pin_bo:
1625         remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1626         drm_vma_node_revoke(&gobj->vma_node, drm_priv);
1627 err_node_allow:
1628         /* Don't unreserve system mem limit twice */
1629         goto err_reserve_limit;
1630 err_bo_create:
1631         unreserve_mem_limit(adev, size, flags);
1632 err_reserve_limit:
1633         mutex_destroy(&(*mem)->lock);
1634         if (gobj)
1635                 drm_gem_object_put(gobj);
1636         else
1637                 kfree(*mem);
1638 err:
1639         if (sg) {
1640                 sg_free_table(sg);
1641                 kfree(sg);
1642         }
1643         return ret;
1644 }
1645
1646 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1647                 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
1648                 uint64_t *size)
1649 {
1650         struct amdkfd_process_info *process_info = mem->process_info;
1651         unsigned long bo_size = mem->bo->tbo.base.size;
1652         struct kfd_mem_attachment *entry, *tmp;
1653         struct bo_vm_reservation_context ctx;
1654         struct ttm_validate_buffer *bo_list_entry;
1655         unsigned int mapped_to_gpu_memory;
1656         int ret;
1657         bool is_imported = false;
1658
1659         mutex_lock(&mem->lock);
1660
1661         /* Unpin MMIO/DOORBELL BO's that were pinnned during allocation */
1662         if (mem->alloc_flags &
1663             (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1664              KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1665                 amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo);
1666         }
1667
1668         mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1669         is_imported = mem->is_imported;
1670         mutex_unlock(&mem->lock);
1671         /* lock is not needed after this, since mem is unused and will
1672          * be freed anyway
1673          */
1674
1675         if (mapped_to_gpu_memory > 0) {
1676                 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1677                                 mem->va, bo_size);
1678                 return -EBUSY;
1679         }
1680
1681         /* Make sure restore workers don't access the BO any more */
1682         bo_list_entry = &mem->validate_list;
1683         mutex_lock(&process_info->lock);
1684         list_del(&bo_list_entry->head);
1685         mutex_unlock(&process_info->lock);
1686
1687         /* No more MMU notifiers */
1688         amdgpu_mn_unregister(mem->bo);
1689
1690         ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1691         if (unlikely(ret))
1692                 return ret;
1693
1694         /* The eviction fence should be removed by the last unmap.
1695          * TODO: Log an error condition if the bo still has the eviction fence
1696          * attached
1697          */
1698         amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1699                                         process_info->eviction_fence);
1700         pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1701                 mem->va + bo_size * (1 + mem->aql_queue));
1702
1703         /* Remove from VM internal data structures */
1704         list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
1705                 kfd_mem_detach(entry);
1706
1707         ret = unreserve_bo_and_vms(&ctx, false, false);
1708
1709         /* Free the sync object */
1710         amdgpu_sync_free(&mem->sync);
1711
1712         /* If the SG is not NULL, it's one we created for a doorbell or mmio
1713          * remap BO. We need to free it.
1714          */
1715         if (mem->bo->tbo.sg) {
1716                 sg_free_table(mem->bo->tbo.sg);
1717                 kfree(mem->bo->tbo.sg);
1718         }
1719
1720         /* Update the size of the BO being freed if it was allocated from
1721          * VRAM and is not imported.
1722          */
1723         if (size) {
1724                 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1725                     (!is_imported))
1726                         *size = bo_size;
1727                 else
1728                         *size = 0;
1729         }
1730
1731         /* Free the BO*/
1732         drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
1733         if (mem->dmabuf)
1734                 dma_buf_put(mem->dmabuf);
1735         mutex_destroy(&mem->lock);
1736
1737         /* If this releases the last reference, it will end up calling
1738          * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why
1739          * this needs to be the last call here.
1740          */
1741         drm_gem_object_put(&mem->bo->tbo.base);
1742
1743         return ret;
1744 }
1745
1746 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1747                 struct amdgpu_device *adev, struct kgd_mem *mem,
1748                 void *drm_priv, bool *table_freed)
1749 {
1750         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1751         int ret;
1752         struct amdgpu_bo *bo;
1753         uint32_t domain;
1754         struct kfd_mem_attachment *entry;
1755         struct bo_vm_reservation_context ctx;
1756         unsigned long bo_size;
1757         bool is_invalid_userptr = false;
1758
1759         bo = mem->bo;
1760         if (!bo) {
1761                 pr_err("Invalid BO when mapping memory to GPU\n");
1762                 return -EINVAL;
1763         }
1764
1765         /* Make sure restore is not running concurrently. Since we
1766          * don't map invalid userptr BOs, we rely on the next restore
1767          * worker to do the mapping
1768          */
1769         mutex_lock(&mem->process_info->lock);
1770
1771         /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1772          * sure that the MMU notifier is no longer running
1773          * concurrently and the queues are actually stopped
1774          */
1775         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1776                 mmap_write_lock(current->mm);
1777                 is_invalid_userptr = atomic_read(&mem->invalid);
1778                 mmap_write_unlock(current->mm);
1779         }
1780
1781         mutex_lock(&mem->lock);
1782
1783         domain = mem->domain;
1784         bo_size = bo->tbo.base.size;
1785
1786         pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1787                         mem->va,
1788                         mem->va + bo_size * (1 + mem->aql_queue),
1789                         avm, domain_string(domain));
1790
1791         if (!kfd_mem_is_attached(avm, mem)) {
1792                 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
1793                 if (ret)
1794                         goto out;
1795         }
1796
1797         ret = reserve_bo_and_vm(mem, avm, &ctx);
1798         if (unlikely(ret))
1799                 goto out;
1800
1801         /* Userptr can be marked as "not invalid", but not actually be
1802          * validated yet (still in the system domain). In that case
1803          * the queues are still stopped and we can leave mapping for
1804          * the next restore worker
1805          */
1806         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1807             bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
1808                 is_invalid_userptr = true;
1809
1810         ret = vm_validate_pt_pd_bos(avm);
1811         if (unlikely(ret))
1812                 goto out_unreserve;
1813
1814         if (mem->mapped_to_gpu_memory == 0 &&
1815             !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1816                 /* Validate BO only once. The eviction fence gets added to BO
1817                  * the first time it is mapped. Validate will wait for all
1818                  * background evictions to complete.
1819                  */
1820                 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1821                 if (ret) {
1822                         pr_debug("Validate failed\n");
1823                         goto out_unreserve;
1824                 }
1825         }
1826
1827         list_for_each_entry(entry, &mem->attachments, list) {
1828                 if (entry->bo_va->base.vm != avm || entry->is_mapped)
1829                         continue;
1830
1831                 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1832                          entry->va, entry->va + bo_size, entry);
1833
1834                 ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
1835                                       is_invalid_userptr, table_freed);
1836                 if (ret) {
1837                         pr_err("Failed to map bo to gpuvm\n");
1838                         goto out_unreserve;
1839                 }
1840
1841                 ret = vm_update_pds(avm, ctx.sync);
1842                 if (ret) {
1843                         pr_err("Failed to update page directories\n");
1844                         goto out_unreserve;
1845                 }
1846
1847                 entry->is_mapped = true;
1848                 mem->mapped_to_gpu_memory++;
1849                 pr_debug("\t INC mapping count %d\n",
1850                          mem->mapped_to_gpu_memory);
1851         }
1852
1853         if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
1854                 amdgpu_bo_fence(bo,
1855                                 &avm->process_info->eviction_fence->base,
1856                                 true);
1857         ret = unreserve_bo_and_vms(&ctx, false, false);
1858
1859         goto out;
1860
1861 out_unreserve:
1862         unreserve_bo_and_vms(&ctx, false, false);
1863 out:
1864         mutex_unlock(&mem->process_info->lock);
1865         mutex_unlock(&mem->lock);
1866         return ret;
1867 }
1868
1869 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1870                 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
1871 {
1872         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1873         struct amdkfd_process_info *process_info = avm->process_info;
1874         unsigned long bo_size = mem->bo->tbo.base.size;
1875         struct kfd_mem_attachment *entry;
1876         struct bo_vm_reservation_context ctx;
1877         int ret;
1878
1879         mutex_lock(&mem->lock);
1880
1881         ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
1882         if (unlikely(ret))
1883                 goto out;
1884         /* If no VMs were reserved, it means the BO wasn't actually mapped */
1885         if (ctx.n_vms == 0) {
1886                 ret = -EINVAL;
1887                 goto unreserve_out;
1888         }
1889
1890         ret = vm_validate_pt_pd_bos(avm);
1891         if (unlikely(ret))
1892                 goto unreserve_out;
1893
1894         pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1895                 mem->va,
1896                 mem->va + bo_size * (1 + mem->aql_queue),
1897                 avm);
1898
1899         list_for_each_entry(entry, &mem->attachments, list) {
1900                 if (entry->bo_va->base.vm != avm || !entry->is_mapped)
1901                         continue;
1902
1903                 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1904                          entry->va, entry->va + bo_size, entry);
1905
1906                 unmap_bo_from_gpuvm(mem, entry, ctx.sync);
1907                 entry->is_mapped = false;
1908
1909                 mem->mapped_to_gpu_memory--;
1910                 pr_debug("\t DEC mapping count %d\n",
1911                          mem->mapped_to_gpu_memory);
1912         }
1913
1914         /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1915          * required.
1916          */
1917         if (mem->mapped_to_gpu_memory == 0 &&
1918             !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
1919             !mem->bo->tbo.pin_count)
1920                 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1921                                                 process_info->eviction_fence);
1922
1923 unreserve_out:
1924         unreserve_bo_and_vms(&ctx, false, false);
1925 out:
1926         mutex_unlock(&mem->lock);
1927         return ret;
1928 }
1929
1930 int amdgpu_amdkfd_gpuvm_sync_memory(
1931                 struct amdgpu_device *adev, struct kgd_mem *mem, bool intr)
1932 {
1933         struct amdgpu_sync sync;
1934         int ret;
1935
1936         amdgpu_sync_create(&sync);
1937
1938         mutex_lock(&mem->lock);
1939         amdgpu_sync_clone(&mem->sync, &sync);
1940         mutex_unlock(&mem->lock);
1941
1942         ret = amdgpu_sync_wait(&sync, intr);
1943         amdgpu_sync_free(&sync);
1944         return ret;
1945 }
1946
1947 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev,
1948                 struct kgd_mem *mem, void **kptr, uint64_t *size)
1949 {
1950         int ret;
1951         struct amdgpu_bo *bo = mem->bo;
1952
1953         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1954                 pr_err("userptr can't be mapped to kernel\n");
1955                 return -EINVAL;
1956         }
1957
1958         /* delete kgd_mem from kfd_bo_list to avoid re-validating
1959          * this BO in BO's restoring after eviction.
1960          */
1961         mutex_lock(&mem->process_info->lock);
1962
1963         ret = amdgpu_bo_reserve(bo, true);
1964         if (ret) {
1965                 pr_err("Failed to reserve bo. ret %d\n", ret);
1966                 goto bo_reserve_failed;
1967         }
1968
1969         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1970         if (ret) {
1971                 pr_err("Failed to pin bo. ret %d\n", ret);
1972                 goto pin_failed;
1973         }
1974
1975         ret = amdgpu_bo_kmap(bo, kptr);
1976         if (ret) {
1977                 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1978                 goto kmap_failed;
1979         }
1980
1981         amdgpu_amdkfd_remove_eviction_fence(
1982                 bo, mem->process_info->eviction_fence);
1983         list_del_init(&mem->validate_list.head);
1984
1985         if (size)
1986                 *size = amdgpu_bo_size(bo);
1987
1988         amdgpu_bo_unreserve(bo);
1989
1990         mutex_unlock(&mem->process_info->lock);
1991         return 0;
1992
1993 kmap_failed:
1994         amdgpu_bo_unpin(bo);
1995 pin_failed:
1996         amdgpu_bo_unreserve(bo);
1997 bo_reserve_failed:
1998         mutex_unlock(&mem->process_info->lock);
1999
2000         return ret;
2001 }
2002
2003 void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct amdgpu_device *adev,
2004                                                   struct kgd_mem *mem)
2005 {
2006         struct amdgpu_bo *bo = mem->bo;
2007
2008         amdgpu_bo_reserve(bo, true);
2009         amdgpu_bo_kunmap(bo);
2010         amdgpu_bo_unpin(bo);
2011         amdgpu_bo_unreserve(bo);
2012 }
2013
2014 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
2015                                           struct kfd_vm_fault_info *mem)
2016 {
2017         if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
2018                 *mem = *adev->gmc.vm_fault_info;
2019                 mb();
2020                 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
2021         }
2022         return 0;
2023 }
2024
2025 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
2026                                       struct dma_buf *dma_buf,
2027                                       uint64_t va, void *drm_priv,
2028                                       struct kgd_mem **mem, uint64_t *size,
2029                                       uint64_t *mmap_offset)
2030 {
2031         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
2032         struct drm_gem_object *obj;
2033         struct amdgpu_bo *bo;
2034         int ret;
2035
2036         if (dma_buf->ops != &amdgpu_dmabuf_ops)
2037                 /* Can't handle non-graphics buffers */
2038                 return -EINVAL;
2039
2040         obj = dma_buf->priv;
2041         if (drm_to_adev(obj->dev) != adev)
2042                 /* Can't handle buffers from other devices */
2043                 return -EINVAL;
2044
2045         bo = gem_to_amdgpu_bo(obj);
2046         if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
2047                                     AMDGPU_GEM_DOMAIN_GTT)))
2048                 /* Only VRAM and GTT BOs are supported */
2049                 return -EINVAL;
2050
2051         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2052         if (!*mem)
2053                 return -ENOMEM;
2054
2055         ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
2056         if (ret) {
2057                 kfree(mem);
2058                 return ret;
2059         }
2060
2061         if (size)
2062                 *size = amdgpu_bo_size(bo);
2063
2064         if (mmap_offset)
2065                 *mmap_offset = amdgpu_bo_mmap_offset(bo);
2066
2067         INIT_LIST_HEAD(&(*mem)->attachments);
2068         mutex_init(&(*mem)->lock);
2069
2070         (*mem)->alloc_flags =
2071                 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
2072                 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
2073                 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
2074                 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
2075
2076         drm_gem_object_get(&bo->tbo.base);
2077         (*mem)->bo = bo;
2078         (*mem)->va = va;
2079         (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
2080                 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
2081         (*mem)->mapped_to_gpu_memory = 0;
2082         (*mem)->process_info = avm->process_info;
2083         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
2084         amdgpu_sync_create(&(*mem)->sync);
2085         (*mem)->is_imported = true;
2086
2087         return 0;
2088 }
2089
2090 /* Evict a userptr BO by stopping the queues if necessary
2091  *
2092  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
2093  * cannot do any memory allocations, and cannot take any locks that
2094  * are held elsewhere while allocating memory. Therefore this is as
2095  * simple as possible, using atomic counters.
2096  *
2097  * It doesn't do anything to the BO itself. The real work happens in
2098  * restore, where we get updated page addresses. This function only
2099  * ensures that GPU access to the BO is stopped.
2100  */
2101 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
2102                                 struct mm_struct *mm)
2103 {
2104         struct amdkfd_process_info *process_info = mem->process_info;
2105         int evicted_bos;
2106         int r = 0;
2107
2108         /* Do not process MMU notifications until stage-4 IOCTL is received */
2109         if (READ_ONCE(process_info->block_mmu_notifications))
2110                 return 0;
2111
2112         atomic_inc(&mem->invalid);
2113         evicted_bos = atomic_inc_return(&process_info->evicted_bos);
2114         if (evicted_bos == 1) {
2115                 /* First eviction, stop the queues */
2116                 r = kgd2kfd_quiesce_mm(mm);
2117                 if (r)
2118                         pr_err("Failed to quiesce KFD\n");
2119                 schedule_delayed_work(&process_info->restore_userptr_work,
2120                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2121         }
2122
2123         return r;
2124 }
2125
2126 /* Update invalid userptr BOs
2127  *
2128  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
2129  * userptr_inval_list and updates user pages for all BOs that have
2130  * been invalidated since their last update.
2131  */
2132 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
2133                                      struct mm_struct *mm)
2134 {
2135         struct kgd_mem *mem, *tmp_mem;
2136         struct amdgpu_bo *bo;
2137         struct ttm_operation_ctx ctx = { false, false };
2138         int invalid, ret;
2139
2140         /* Move all invalidated BOs to the userptr_inval_list and
2141          * release their user pages by migration to the CPU domain
2142          */
2143         list_for_each_entry_safe(mem, tmp_mem,
2144                                  &process_info->userptr_valid_list,
2145                                  validate_list.head) {
2146                 if (!atomic_read(&mem->invalid))
2147                         continue; /* BO is still valid */
2148
2149                 bo = mem->bo;
2150
2151                 if (amdgpu_bo_reserve(bo, true))
2152                         return -EAGAIN;
2153                 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
2154                 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2155                 amdgpu_bo_unreserve(bo);
2156                 if (ret) {
2157                         pr_err("%s: Failed to invalidate userptr BO\n",
2158                                __func__);
2159                         return -EAGAIN;
2160                 }
2161
2162                 list_move_tail(&mem->validate_list.head,
2163                                &process_info->userptr_inval_list);
2164         }
2165
2166         if (list_empty(&process_info->userptr_inval_list))
2167                 return 0; /* All evicted userptr BOs were freed */
2168
2169         /* Go through userptr_inval_list and update any invalid user_pages */
2170         list_for_each_entry(mem, &process_info->userptr_inval_list,
2171                             validate_list.head) {
2172                 invalid = atomic_read(&mem->invalid);
2173                 if (!invalid)
2174                         /* BO hasn't been invalidated since the last
2175                          * revalidation attempt. Keep its BO list.
2176                          */
2177                         continue;
2178
2179                 bo = mem->bo;
2180
2181                 /* Get updated user pages */
2182                 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
2183                 if (ret) {
2184                         pr_debug("Failed %d to get user pages\n", ret);
2185
2186                         /* Return -EFAULT bad address error as success. It will
2187                          * fail later with a VM fault if the GPU tries to access
2188                          * it. Better than hanging indefinitely with stalled
2189                          * user mode queues.
2190                          *
2191                          * Return other error -EBUSY or -ENOMEM to retry restore
2192                          */
2193                         if (ret != -EFAULT)
2194                                 return ret;
2195                 } else {
2196
2197                         /*
2198                          * FIXME: Cannot ignore the return code, must hold
2199                          * notifier_lock
2200                          */
2201                         amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
2202                 }
2203
2204                 /* Mark the BO as valid unless it was invalidated
2205                  * again concurrently.
2206                  */
2207                 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
2208                         return -EAGAIN;
2209         }
2210
2211         return 0;
2212 }
2213
2214 /* Validate invalid userptr BOs
2215  *
2216  * Validates BOs on the userptr_inval_list, and moves them back to the
2217  * userptr_valid_list. Also updates GPUVM page tables with new page
2218  * addresses and waits for the page table updates to complete.
2219  */
2220 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
2221 {
2222         struct amdgpu_bo_list_entry *pd_bo_list_entries;
2223         struct list_head resv_list, duplicates;
2224         struct ww_acquire_ctx ticket;
2225         struct amdgpu_sync sync;
2226
2227         struct amdgpu_vm *peer_vm;
2228         struct kgd_mem *mem, *tmp_mem;
2229         struct amdgpu_bo *bo;
2230         struct ttm_operation_ctx ctx = { false, false };
2231         int i, ret;
2232
2233         pd_bo_list_entries = kcalloc(process_info->n_vms,
2234                                      sizeof(struct amdgpu_bo_list_entry),
2235                                      GFP_KERNEL);
2236         if (!pd_bo_list_entries) {
2237                 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
2238                 ret = -ENOMEM;
2239                 goto out_no_mem;
2240         }
2241
2242         INIT_LIST_HEAD(&resv_list);
2243         INIT_LIST_HEAD(&duplicates);
2244
2245         /* Get all the page directory BOs that need to be reserved */
2246         i = 0;
2247         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2248                             vm_list_node)
2249                 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
2250                                     &pd_bo_list_entries[i++]);
2251         /* Add the userptr_inval_list entries to resv_list */
2252         list_for_each_entry(mem, &process_info->userptr_inval_list,
2253                             validate_list.head) {
2254                 list_add_tail(&mem->resv_list.head, &resv_list);
2255                 mem->resv_list.bo = mem->validate_list.bo;
2256                 mem->resv_list.num_shared = mem->validate_list.num_shared;
2257         }
2258
2259         /* Reserve all BOs and page tables for validation */
2260         ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
2261         WARN(!list_empty(&duplicates), "Duplicates should be empty");
2262         if (ret)
2263                 goto out_free;
2264
2265         amdgpu_sync_create(&sync);
2266
2267         ret = process_validate_vms(process_info);
2268         if (ret)
2269                 goto unreserve_out;
2270
2271         /* Validate BOs and update GPUVM page tables */
2272         list_for_each_entry_safe(mem, tmp_mem,
2273                                  &process_info->userptr_inval_list,
2274                                  validate_list.head) {
2275                 struct kfd_mem_attachment *attachment;
2276
2277                 bo = mem->bo;
2278
2279                 /* Validate the BO if we got user pages */
2280                 if (bo->tbo.ttm->pages[0]) {
2281                         amdgpu_bo_placement_from_domain(bo, mem->domain);
2282                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2283                         if (ret) {
2284                                 pr_err("%s: failed to validate BO\n", __func__);
2285                                 goto unreserve_out;
2286                         }
2287                 }
2288
2289                 list_move_tail(&mem->validate_list.head,
2290                                &process_info->userptr_valid_list);
2291
2292                 /* Update mapping. If the BO was not validated
2293                  * (because we couldn't get user pages), this will
2294                  * clear the page table entries, which will result in
2295                  * VM faults if the GPU tries to access the invalid
2296                  * memory.
2297                  */
2298                 list_for_each_entry(attachment, &mem->attachments, list) {
2299                         if (!attachment->is_mapped)
2300                                 continue;
2301
2302                         kfd_mem_dmaunmap_attachment(mem, attachment);
2303                         ret = update_gpuvm_pte(mem, attachment, &sync, NULL);
2304                         if (ret) {
2305                                 pr_err("%s: update PTE failed\n", __func__);
2306                                 /* make sure this gets validated again */
2307                                 atomic_inc(&mem->invalid);
2308                                 goto unreserve_out;
2309                         }
2310                 }
2311         }
2312
2313         /* Update page directories */
2314         ret = process_update_pds(process_info, &sync);
2315
2316 unreserve_out:
2317         ttm_eu_backoff_reservation(&ticket, &resv_list);
2318         amdgpu_sync_wait(&sync, false);
2319         amdgpu_sync_free(&sync);
2320 out_free:
2321         kfree(pd_bo_list_entries);
2322 out_no_mem:
2323
2324         return ret;
2325 }
2326
2327 /* Worker callback to restore evicted userptr BOs
2328  *
2329  * Tries to update and validate all userptr BOs. If successful and no
2330  * concurrent evictions happened, the queues are restarted. Otherwise,
2331  * reschedule for another attempt later.
2332  */
2333 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
2334 {
2335         struct delayed_work *dwork = to_delayed_work(work);
2336         struct amdkfd_process_info *process_info =
2337                 container_of(dwork, struct amdkfd_process_info,
2338                              restore_userptr_work);
2339         struct task_struct *usertask;
2340         struct mm_struct *mm;
2341         int evicted_bos;
2342
2343         evicted_bos = atomic_read(&process_info->evicted_bos);
2344         if (!evicted_bos)
2345                 return;
2346
2347         /* Reference task and mm in case of concurrent process termination */
2348         usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
2349         if (!usertask)
2350                 return;
2351         mm = get_task_mm(usertask);
2352         if (!mm) {
2353                 put_task_struct(usertask);
2354                 return;
2355         }
2356
2357         mutex_lock(&process_info->lock);
2358
2359         if (update_invalid_user_pages(process_info, mm))
2360                 goto unlock_out;
2361         /* userptr_inval_list can be empty if all evicted userptr BOs
2362          * have been freed. In that case there is nothing to validate
2363          * and we can just restart the queues.
2364          */
2365         if (!list_empty(&process_info->userptr_inval_list)) {
2366                 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
2367                         goto unlock_out; /* Concurrent eviction, try again */
2368
2369                 if (validate_invalid_user_pages(process_info))
2370                         goto unlock_out;
2371         }
2372         /* Final check for concurrent evicton and atomic update. If
2373          * another eviction happens after successful update, it will
2374          * be a first eviction that calls quiesce_mm. The eviction
2375          * reference counting inside KFD will handle this case.
2376          */
2377         if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
2378             evicted_bos)
2379                 goto unlock_out;
2380         evicted_bos = 0;
2381         if (kgd2kfd_resume_mm(mm)) {
2382                 pr_err("%s: Failed to resume KFD\n", __func__);
2383                 /* No recovery from this failure. Probably the CP is
2384                  * hanging. No point trying again.
2385                  */
2386         }
2387
2388 unlock_out:
2389         mutex_unlock(&process_info->lock);
2390         mmput(mm);
2391         put_task_struct(usertask);
2392
2393         /* If validation failed, reschedule another attempt */
2394         if (evicted_bos)
2395                 schedule_delayed_work(&process_info->restore_userptr_work,
2396                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2397 }
2398
2399 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2400  *   KFD process identified by process_info
2401  *
2402  * @process_info: amdkfd_process_info of the KFD process
2403  *
2404  * After memory eviction, restore thread calls this function. The function
2405  * should be called when the Process is still valid. BO restore involves -
2406  *
2407  * 1.  Release old eviction fence and create new one
2408  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2409  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2410  *     BOs that need to be reserved.
2411  * 4.  Reserve all the BOs
2412  * 5.  Validate of PD and PT BOs.
2413  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2414  * 7.  Add fence to all PD and PT BOs.
2415  * 8.  Unreserve all BOs
2416  */
2417 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2418 {
2419         struct amdgpu_bo_list_entry *pd_bo_list;
2420         struct amdkfd_process_info *process_info = info;
2421         struct amdgpu_vm *peer_vm;
2422         struct kgd_mem *mem;
2423         struct bo_vm_reservation_context ctx;
2424         struct amdgpu_amdkfd_fence *new_fence;
2425         int ret = 0, i;
2426         struct list_head duplicate_save;
2427         struct amdgpu_sync sync_obj;
2428         unsigned long failed_size = 0;
2429         unsigned long total_size = 0;
2430
2431         INIT_LIST_HEAD(&duplicate_save);
2432         INIT_LIST_HEAD(&ctx.list);
2433         INIT_LIST_HEAD(&ctx.duplicates);
2434
2435         pd_bo_list = kcalloc(process_info->n_vms,
2436                              sizeof(struct amdgpu_bo_list_entry),
2437                              GFP_KERNEL);
2438         if (!pd_bo_list)
2439                 return -ENOMEM;
2440
2441         i = 0;
2442         mutex_lock(&process_info->lock);
2443         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2444                         vm_list_node)
2445                 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2446
2447         /* Reserve all BOs and page tables/directory. Add all BOs from
2448          * kfd_bo_list to ctx.list
2449          */
2450         list_for_each_entry(mem, &process_info->kfd_bo_list,
2451                             validate_list.head) {
2452
2453                 list_add_tail(&mem->resv_list.head, &ctx.list);
2454                 mem->resv_list.bo = mem->validate_list.bo;
2455                 mem->resv_list.num_shared = mem->validate_list.num_shared;
2456         }
2457
2458         ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2459                                      false, &duplicate_save);
2460         if (ret) {
2461                 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2462                 goto ttm_reserve_fail;
2463         }
2464
2465         amdgpu_sync_create(&sync_obj);
2466
2467         /* Validate PDs and PTs */
2468         ret = process_validate_vms(process_info);
2469         if (ret)
2470                 goto validate_map_fail;
2471
2472         ret = process_sync_pds_resv(process_info, &sync_obj);
2473         if (ret) {
2474                 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2475                 goto validate_map_fail;
2476         }
2477
2478         /* Validate BOs and map them to GPUVM (update VM page tables). */
2479         list_for_each_entry(mem, &process_info->kfd_bo_list,
2480                             validate_list.head) {
2481
2482                 struct amdgpu_bo *bo = mem->bo;
2483                 uint32_t domain = mem->domain;
2484                 struct kfd_mem_attachment *attachment;
2485
2486                 total_size += amdgpu_bo_size(bo);
2487
2488                 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2489                 if (ret) {
2490                         pr_debug("Memory eviction: Validate BOs failed\n");
2491                         failed_size += amdgpu_bo_size(bo);
2492                         ret = amdgpu_amdkfd_bo_validate(bo,
2493                                                 AMDGPU_GEM_DOMAIN_GTT, false);
2494                         if (ret) {
2495                                 pr_debug("Memory eviction: Try again\n");
2496                                 goto validate_map_fail;
2497                         }
2498                 }
2499                 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2500                 if (ret) {
2501                         pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2502                         goto validate_map_fail;
2503                 }
2504                 list_for_each_entry(attachment, &mem->attachments, list) {
2505                         if (!attachment->is_mapped)
2506                                 continue;
2507
2508                         kfd_mem_dmaunmap_attachment(mem, attachment);
2509                         ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL);
2510                         if (ret) {
2511                                 pr_debug("Memory eviction: update PTE failed. Try again\n");
2512                                 goto validate_map_fail;
2513                         }
2514                 }
2515         }
2516
2517         if (failed_size)
2518                 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2519
2520         /* Update page directories */
2521         ret = process_update_pds(process_info, &sync_obj);
2522         if (ret) {
2523                 pr_debug("Memory eviction: update PDs failed. Try again\n");
2524                 goto validate_map_fail;
2525         }
2526
2527         /* Wait for validate and PT updates to finish */
2528         amdgpu_sync_wait(&sync_obj, false);
2529
2530         /* Release old eviction fence and create new one, because fence only
2531          * goes from unsignaled to signaled, fence cannot be reused.
2532          * Use context and mm from the old fence.
2533          */
2534         new_fence = amdgpu_amdkfd_fence_create(
2535                                 process_info->eviction_fence->base.context,
2536                                 process_info->eviction_fence->mm,
2537                                 NULL);
2538         if (!new_fence) {
2539                 pr_err("Failed to create eviction fence\n");
2540                 ret = -ENOMEM;
2541                 goto validate_map_fail;
2542         }
2543         dma_fence_put(&process_info->eviction_fence->base);
2544         process_info->eviction_fence = new_fence;
2545         *ef = dma_fence_get(&new_fence->base);
2546
2547         /* Attach new eviction fence to all BOs */
2548         list_for_each_entry(mem, &process_info->kfd_bo_list,
2549                 validate_list.head)
2550                 amdgpu_bo_fence(mem->bo,
2551                         &process_info->eviction_fence->base, true);
2552
2553         /* Attach eviction fence to PD / PT BOs */
2554         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2555                             vm_list_node) {
2556                 struct amdgpu_bo *bo = peer_vm->root.bo;
2557
2558                 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2559         }
2560
2561 validate_map_fail:
2562         ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2563         amdgpu_sync_free(&sync_obj);
2564 ttm_reserve_fail:
2565         mutex_unlock(&process_info->lock);
2566         kfree(pd_bo_list);
2567         return ret;
2568 }
2569
2570 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2571 {
2572         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2573         struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2574         int ret;
2575
2576         if (!info || !gws)
2577                 return -EINVAL;
2578
2579         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2580         if (!*mem)
2581                 return -ENOMEM;
2582
2583         mutex_init(&(*mem)->lock);
2584         INIT_LIST_HEAD(&(*mem)->attachments);
2585         (*mem)->bo = amdgpu_bo_ref(gws_bo);
2586         (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2587         (*mem)->process_info = process_info;
2588         add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2589         amdgpu_sync_create(&(*mem)->sync);
2590
2591
2592         /* Validate gws bo the first time it is added to process */
2593         mutex_lock(&(*mem)->process_info->lock);
2594         ret = amdgpu_bo_reserve(gws_bo, false);
2595         if (unlikely(ret)) {
2596                 pr_err("Reserve gws bo failed %d\n", ret);
2597                 goto bo_reservation_failure;
2598         }
2599
2600         ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2601         if (ret) {
2602                 pr_err("GWS BO validate failed %d\n", ret);
2603                 goto bo_validation_failure;
2604         }
2605         /* GWS resource is shared b/t amdgpu and amdkfd
2606          * Add process eviction fence to bo so they can
2607          * evict each other.
2608          */
2609         ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2610         if (ret)
2611                 goto reserve_shared_fail;
2612         amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2613         amdgpu_bo_unreserve(gws_bo);
2614         mutex_unlock(&(*mem)->process_info->lock);
2615
2616         return ret;
2617
2618 reserve_shared_fail:
2619 bo_validation_failure:
2620         amdgpu_bo_unreserve(gws_bo);
2621 bo_reservation_failure:
2622         mutex_unlock(&(*mem)->process_info->lock);
2623         amdgpu_sync_free(&(*mem)->sync);
2624         remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2625         amdgpu_bo_unref(&gws_bo);
2626         mutex_destroy(&(*mem)->lock);
2627         kfree(*mem);
2628         *mem = NULL;
2629         return ret;
2630 }
2631
2632 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2633 {
2634         int ret;
2635         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2636         struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2637         struct amdgpu_bo *gws_bo = kgd_mem->bo;
2638
2639         /* Remove BO from process's validate list so restore worker won't touch
2640          * it anymore
2641          */
2642         remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2643
2644         ret = amdgpu_bo_reserve(gws_bo, false);
2645         if (unlikely(ret)) {
2646                 pr_err("Reserve gws bo failed %d\n", ret);
2647                 //TODO add BO back to validate_list?
2648                 return ret;
2649         }
2650         amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2651                         process_info->eviction_fence);
2652         amdgpu_bo_unreserve(gws_bo);
2653         amdgpu_sync_free(&kgd_mem->sync);
2654         amdgpu_bo_unref(&gws_bo);
2655         mutex_destroy(&kgd_mem->lock);
2656         kfree(mem);
2657         return 0;
2658 }
2659
2660 /* Returns GPU-specific tiling mode information */
2661 int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
2662                                 struct tile_config *config)
2663 {
2664         config->gb_addr_config = adev->gfx.config.gb_addr_config;
2665         config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2666         config->num_tile_configs =
2667                         ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2668         config->macro_tile_config_ptr =
2669                         adev->gfx.config.macrotile_mode_array;
2670         config->num_macro_tile_configs =
2671                         ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2672
2673         /* Those values are not set from GFX9 onwards */
2674         config->num_banks = adev->gfx.config.num_banks;
2675         config->num_ranks = adev->gfx.config.num_ranks;
2676
2677         return 0;
2678 }
2679
2680 bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem)
2681 {
2682         struct kfd_mem_attachment *entry;
2683
2684         list_for_each_entry(entry, &mem->attachments, list) {
2685                 if (entry->is_mapped && entry->adev == adev)
2686                         return true;
2687         }
2688         return false;
2689 }
This page took 0.198918 seconds and 4 git commands to generate.