]> Git Repo - J-linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
Merge tag 'amd-drm-next-5.8-2020-04-30' of git://people.freedesktop.org/~agd5f/linux...
[J-linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
27
28 #include "amdgpu_object.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_dma_buf.h"
32 #include <uapi/linux/kfd_ioctl.h>
33
34 /* BO flag to indicate a KFD userptr BO */
35 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
36
37 /* Userptr restore delay, just long enough to allow consecutive VM
38  * changes to accumulate
39  */
40 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
41
42 /* Impose limit on how much memory KFD can use */
43 static struct {
44         uint64_t max_system_mem_limit;
45         uint64_t max_ttm_mem_limit;
46         int64_t system_mem_used;
47         int64_t ttm_mem_used;
48         spinlock_t mem_limit_lock;
49 } kfd_mem_limit;
50
51 /* Struct used for amdgpu_amdkfd_bo_validate */
52 struct amdgpu_vm_parser {
53         uint32_t        domain;
54         bool            wait;
55 };
56
57 static const char * const domain_bit_to_string[] = {
58                 "CPU",
59                 "GTT",
60                 "VRAM",
61                 "GDS",
62                 "GWS",
63                 "OA"
64 };
65
66 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
67
68 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
69
70
71 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
72 {
73         return (struct amdgpu_device *)kgd;
74 }
75
76 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
77                 struct kgd_mem *mem)
78 {
79         struct kfd_bo_va_list *entry;
80
81         list_for_each_entry(entry, &mem->bo_va_list, bo_list)
82                 if (entry->bo_va->base.vm == avm)
83                         return false;
84
85         return true;
86 }
87
88 /* Set memory usage limits. Current, limits are
89  *  System (TTM + userptr) memory - 15/16th System RAM
90  *  TTM memory - 3/8th System RAM
91  */
92 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
93 {
94         struct sysinfo si;
95         uint64_t mem;
96
97         si_meminfo(&si);
98         mem = si.totalram - si.totalhigh;
99         mem *= si.mem_unit;
100
101         spin_lock_init(&kfd_mem_limit.mem_limit_lock);
102         kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
103         kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
104         pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
105                 (kfd_mem_limit.max_system_mem_limit >> 20),
106                 (kfd_mem_limit.max_ttm_mem_limit >> 20));
107 }
108
109 /* Estimate page table size needed to represent a given memory size
110  *
111  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
112  * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
113  * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
114  * for 2MB pages for TLB efficiency. However, small allocations and
115  * fragmented system memory still need some 4KB pages. We choose a
116  * compromise that should work in most cases without reserving too
117  * much memory for page tables unnecessarily (factor 16K, >> 14).
118  */
119 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
120
121 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
122                 uint64_t size, u32 domain, bool sg)
123 {
124         uint64_t reserved_for_pt =
125                 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
126         size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
127         int ret = 0;
128
129         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
130                                        sizeof(struct amdgpu_bo));
131
132         vram_needed = 0;
133         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
134                 /* TTM GTT memory */
135                 system_mem_needed = acc_size + size;
136                 ttm_mem_needed = acc_size + size;
137         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
138                 /* Userptr */
139                 system_mem_needed = acc_size + size;
140                 ttm_mem_needed = acc_size;
141         } else {
142                 /* VRAM and SG */
143                 system_mem_needed = acc_size;
144                 ttm_mem_needed = acc_size;
145                 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
146                         vram_needed = size;
147         }
148
149         spin_lock(&kfd_mem_limit.mem_limit_lock);
150
151         if ((kfd_mem_limit.system_mem_used + system_mem_needed >
152              kfd_mem_limit.max_system_mem_limit) ||
153             (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
154              kfd_mem_limit.max_ttm_mem_limit) ||
155             (adev->kfd.vram_used + vram_needed >
156              adev->gmc.real_vram_size - reserved_for_pt)) {
157                 ret = -ENOMEM;
158         } else {
159                 kfd_mem_limit.system_mem_used += system_mem_needed;
160                 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
161                 adev->kfd.vram_used += vram_needed;
162         }
163
164         spin_unlock(&kfd_mem_limit.mem_limit_lock);
165         return ret;
166 }
167
168 static void unreserve_mem_limit(struct amdgpu_device *adev,
169                 uint64_t size, u32 domain, bool sg)
170 {
171         size_t acc_size;
172
173         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
174                                        sizeof(struct amdgpu_bo));
175
176         spin_lock(&kfd_mem_limit.mem_limit_lock);
177         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
178                 kfd_mem_limit.system_mem_used -= (acc_size + size);
179                 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
180         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
181                 kfd_mem_limit.system_mem_used -= (acc_size + size);
182                 kfd_mem_limit.ttm_mem_used -= acc_size;
183         } else {
184                 kfd_mem_limit.system_mem_used -= acc_size;
185                 kfd_mem_limit.ttm_mem_used -= acc_size;
186                 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
187                         adev->kfd.vram_used -= size;
188                         WARN_ONCE(adev->kfd.vram_used < 0,
189                                   "kfd VRAM memory accounting unbalanced");
190                 }
191         }
192         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
193                   "kfd system memory accounting unbalanced");
194         WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
195                   "kfd TTM memory accounting unbalanced");
196
197         spin_unlock(&kfd_mem_limit.mem_limit_lock);
198 }
199
200 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
201 {
202         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
203         u32 domain = bo->preferred_domains;
204         bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
205
206         if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
207                 domain = AMDGPU_GEM_DOMAIN_CPU;
208                 sg = false;
209         }
210
211         unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
212 }
213
214
215 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
216  *  reservation object.
217  *
218  * @bo: [IN] Remove eviction fence(s) from this BO
219  * @ef: [IN] This eviction fence is removed if it
220  *  is present in the shared list.
221  *
222  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
223  */
224 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
225                                         struct amdgpu_amdkfd_fence *ef)
226 {
227         struct dma_resv *resv = bo->tbo.base.resv;
228         struct dma_resv_list *old, *new;
229         unsigned int i, j, k;
230
231         if (!ef)
232                 return -EINVAL;
233
234         old = dma_resv_get_list(resv);
235         if (!old)
236                 return 0;
237
238         new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
239                       GFP_KERNEL);
240         if (!new)
241                 return -ENOMEM;
242
243         /* Go through all the shared fences in the resevation object and sort
244          * the interesting ones to the end of the list.
245          */
246         for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
247                 struct dma_fence *f;
248
249                 f = rcu_dereference_protected(old->shared[i],
250                                               dma_resv_held(resv));
251
252                 if (f->context == ef->base.context)
253                         RCU_INIT_POINTER(new->shared[--j], f);
254                 else
255                         RCU_INIT_POINTER(new->shared[k++], f);
256         }
257         new->shared_max = old->shared_max;
258         new->shared_count = k;
259
260         /* Install the new fence list, seqcount provides the barriers */
261         preempt_disable();
262         write_seqcount_begin(&resv->seq);
263         RCU_INIT_POINTER(resv->fence, new);
264         write_seqcount_end(&resv->seq);
265         preempt_enable();
266
267         /* Drop the references to the removed fences or move them to ef_list */
268         for (i = j, k = 0; i < old->shared_count; ++i) {
269                 struct dma_fence *f;
270
271                 f = rcu_dereference_protected(new->shared[i],
272                                               dma_resv_held(resv));
273                 dma_fence_put(f);
274         }
275         kfree_rcu(old, rcu);
276
277         return 0;
278 }
279
280 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
281 {
282         struct amdgpu_bo *root = bo;
283         struct amdgpu_vm_bo_base *vm_bo;
284         struct amdgpu_vm *vm;
285         struct amdkfd_process_info *info;
286         struct amdgpu_amdkfd_fence *ef;
287         int ret;
288
289         /* we can always get vm_bo from root PD bo.*/
290         while (root->parent)
291                 root = root->parent;
292
293         vm_bo = root->vm_bo;
294         if (!vm_bo)
295                 return 0;
296
297         vm = vm_bo->vm;
298         if (!vm)
299                 return 0;
300
301         info = vm->process_info;
302         if (!info || !info->eviction_fence)
303                 return 0;
304
305         ef = container_of(dma_fence_get(&info->eviction_fence->base),
306                         struct amdgpu_amdkfd_fence, base);
307
308         BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
309         ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
310         dma_resv_unlock(bo->tbo.base.resv);
311
312         dma_fence_put(&ef->base);
313         return ret;
314 }
315
316 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
317                                      bool wait)
318 {
319         struct ttm_operation_ctx ctx = { false, false };
320         int ret;
321
322         if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
323                  "Called with userptr BO"))
324                 return -EINVAL;
325
326         amdgpu_bo_placement_from_domain(bo, domain);
327
328         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
329         if (ret)
330                 goto validate_fail;
331         if (wait)
332                 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
333
334 validate_fail:
335         return ret;
336 }
337
338 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
339 {
340         struct amdgpu_vm_parser *p = param;
341
342         return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
343 }
344
345 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
346  *
347  * Page directories are not updated here because huge page handling
348  * during page table updates can invalidate page directory entries
349  * again. Page directories are only updated after updating page
350  * tables.
351  */
352 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
353 {
354         struct amdgpu_bo *pd = vm->root.base.bo;
355         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
356         struct amdgpu_vm_parser param;
357         int ret;
358
359         param.domain = AMDGPU_GEM_DOMAIN_VRAM;
360         param.wait = false;
361
362         ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
363                                         &param);
364         if (ret) {
365                 pr_err("failed to validate PT BOs\n");
366                 return ret;
367         }
368
369         ret = amdgpu_amdkfd_validate(&param, pd);
370         if (ret) {
371                 pr_err("failed to validate PD\n");
372                 return ret;
373         }
374
375         vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
376
377         if (vm->use_cpu_for_update) {
378                 ret = amdgpu_bo_kmap(pd, NULL);
379                 if (ret) {
380                         pr_err("failed to kmap PD, ret=%d\n", ret);
381                         return ret;
382                 }
383         }
384
385         return 0;
386 }
387
388 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
389 {
390         struct amdgpu_bo *pd = vm->root.base.bo;
391         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
392         int ret;
393
394         ret = amdgpu_vm_update_pdes(adev, vm, false);
395         if (ret)
396                 return ret;
397
398         return amdgpu_sync_fence(sync, vm->last_update, false);
399 }
400
401 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
402 {
403         struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
404         bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
405         uint32_t mapping_flags;
406
407         mapping_flags = AMDGPU_VM_PAGE_READABLE;
408         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
409                 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
410         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
411                 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
412
413         switch (adev->asic_type) {
414         case CHIP_ARCTURUS:
415                 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
416                         if (bo_adev == adev)
417                                 mapping_flags |= coherent ?
418                                         AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
419                         else
420                                 mapping_flags |= AMDGPU_VM_MTYPE_UC;
421                 } else {
422                         mapping_flags |= coherent ?
423                                 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
424                 }
425                 break;
426         default:
427                 mapping_flags |= coherent ?
428                         AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
429         }
430
431         return amdgpu_gem_va_map_flags(adev, mapping_flags);
432 }
433
434 /* add_bo_to_vm - Add a BO to a VM
435  *
436  * Everything that needs to bo done only once when a BO is first added
437  * to a VM. It can later be mapped and unmapped many times without
438  * repeating these steps.
439  *
440  * 1. Allocate and initialize BO VA entry data structure
441  * 2. Add BO to the VM
442  * 3. Determine ASIC-specific PTE flags
443  * 4. Alloc page tables and directories if needed
444  * 4a.  Validate new page tables and directories
445  */
446 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
447                 struct amdgpu_vm *vm, bool is_aql,
448                 struct kfd_bo_va_list **p_bo_va_entry)
449 {
450         int ret;
451         struct kfd_bo_va_list *bo_va_entry;
452         struct amdgpu_bo *bo = mem->bo;
453         uint64_t va = mem->va;
454         struct list_head *list_bo_va = &mem->bo_va_list;
455         unsigned long bo_size = bo->tbo.mem.size;
456
457         if (!va) {
458                 pr_err("Invalid VA when adding BO to VM\n");
459                 return -EINVAL;
460         }
461
462         if (is_aql)
463                 va += bo_size;
464
465         bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
466         if (!bo_va_entry)
467                 return -ENOMEM;
468
469         pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
470                         va + bo_size, vm);
471
472         /* Add BO to VM internal data structures*/
473         bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
474         if (!bo_va_entry->bo_va) {
475                 ret = -EINVAL;
476                 pr_err("Failed to add BO object to VM. ret == %d\n",
477                                 ret);
478                 goto err_vmadd;
479         }
480
481         bo_va_entry->va = va;
482         bo_va_entry->pte_flags = get_pte_flags(adev, mem);
483         bo_va_entry->kgd_dev = (void *)adev;
484         list_add(&bo_va_entry->bo_list, list_bo_va);
485
486         if (p_bo_va_entry)
487                 *p_bo_va_entry = bo_va_entry;
488
489         /* Allocate validate page tables if needed */
490         ret = vm_validate_pt_pd_bos(vm);
491         if (ret) {
492                 pr_err("validate_pt_pd_bos() failed\n");
493                 goto err_alloc_pts;
494         }
495
496         return 0;
497
498 err_alloc_pts:
499         amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
500         list_del(&bo_va_entry->bo_list);
501 err_vmadd:
502         kfree(bo_va_entry);
503         return ret;
504 }
505
506 static void remove_bo_from_vm(struct amdgpu_device *adev,
507                 struct kfd_bo_va_list *entry, unsigned long size)
508 {
509         pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
510                         entry->va,
511                         entry->va + size, entry);
512         amdgpu_vm_bo_rmv(adev, entry->bo_va);
513         list_del(&entry->bo_list);
514         kfree(entry);
515 }
516
517 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
518                                 struct amdkfd_process_info *process_info,
519                                 bool userptr)
520 {
521         struct ttm_validate_buffer *entry = &mem->validate_list;
522         struct amdgpu_bo *bo = mem->bo;
523
524         INIT_LIST_HEAD(&entry->head);
525         entry->num_shared = 1;
526         entry->bo = &bo->tbo;
527         mutex_lock(&process_info->lock);
528         if (userptr)
529                 list_add_tail(&entry->head, &process_info->userptr_valid_list);
530         else
531                 list_add_tail(&entry->head, &process_info->kfd_bo_list);
532         mutex_unlock(&process_info->lock);
533 }
534
535 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
536                 struct amdkfd_process_info *process_info)
537 {
538         struct ttm_validate_buffer *bo_list_entry;
539
540         bo_list_entry = &mem->validate_list;
541         mutex_lock(&process_info->lock);
542         list_del(&bo_list_entry->head);
543         mutex_unlock(&process_info->lock);
544 }
545
546 /* Initializes user pages. It registers the MMU notifier and validates
547  * the userptr BO in the GTT domain.
548  *
549  * The BO must already be on the userptr_valid_list. Otherwise an
550  * eviction and restore may happen that leaves the new BO unmapped
551  * with the user mode queues running.
552  *
553  * Takes the process_info->lock to protect against concurrent restore
554  * workers.
555  *
556  * Returns 0 for success, negative errno for errors.
557  */
558 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
559 {
560         struct amdkfd_process_info *process_info = mem->process_info;
561         struct amdgpu_bo *bo = mem->bo;
562         struct ttm_operation_ctx ctx = { true, false };
563         int ret = 0;
564
565         mutex_lock(&process_info->lock);
566
567         ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
568         if (ret) {
569                 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
570                 goto out;
571         }
572
573         ret = amdgpu_mn_register(bo, user_addr);
574         if (ret) {
575                 pr_err("%s: Failed to register MMU notifier: %d\n",
576                        __func__, ret);
577                 goto out;
578         }
579
580         ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
581         if (ret) {
582                 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
583                 goto unregister_out;
584         }
585
586         ret = amdgpu_bo_reserve(bo, true);
587         if (ret) {
588                 pr_err("%s: Failed to reserve BO\n", __func__);
589                 goto release_out;
590         }
591         amdgpu_bo_placement_from_domain(bo, mem->domain);
592         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
593         if (ret)
594                 pr_err("%s: failed to validate BO\n", __func__);
595         amdgpu_bo_unreserve(bo);
596
597 release_out:
598         amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
599 unregister_out:
600         if (ret)
601                 amdgpu_mn_unregister(bo);
602 out:
603         mutex_unlock(&process_info->lock);
604         return ret;
605 }
606
607 /* Reserving a BO and its page table BOs must happen atomically to
608  * avoid deadlocks. Some operations update multiple VMs at once. Track
609  * all the reservation info in a context structure. Optionally a sync
610  * object can track VM updates.
611  */
612 struct bo_vm_reservation_context {
613         struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
614         unsigned int n_vms;                 /* Number of VMs reserved       */
615         struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
616         struct ww_acquire_ctx ticket;       /* Reservation ticket           */
617         struct list_head list, duplicates;  /* BO lists                     */
618         struct amdgpu_sync *sync;           /* Pointer to sync object       */
619         bool reserved;                      /* Whether BOs are reserved     */
620 };
621
622 enum bo_vm_match {
623         BO_VM_NOT_MAPPED = 0,   /* Match VMs where a BO is not mapped */
624         BO_VM_MAPPED,           /* Match VMs where a BO is mapped     */
625         BO_VM_ALL,              /* Match all VMs a BO was added to    */
626 };
627
628 /**
629  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
630  * @mem: KFD BO structure.
631  * @vm: the VM to reserve.
632  * @ctx: the struct that will be used in unreserve_bo_and_vms().
633  */
634 static int reserve_bo_and_vm(struct kgd_mem *mem,
635                               struct amdgpu_vm *vm,
636                               struct bo_vm_reservation_context *ctx)
637 {
638         struct amdgpu_bo *bo = mem->bo;
639         int ret;
640
641         WARN_ON(!vm);
642
643         ctx->reserved = false;
644         ctx->n_vms = 1;
645         ctx->sync = &mem->sync;
646
647         INIT_LIST_HEAD(&ctx->list);
648         INIT_LIST_HEAD(&ctx->duplicates);
649
650         ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
651         if (!ctx->vm_pd)
652                 return -ENOMEM;
653
654         ctx->kfd_bo.priority = 0;
655         ctx->kfd_bo.tv.bo = &bo->tbo;
656         ctx->kfd_bo.tv.num_shared = 1;
657         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
658
659         amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
660
661         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
662                                      false, &ctx->duplicates);
663         if (ret) {
664                 pr_err("Failed to reserve buffers in ttm.\n");
665                 kfree(ctx->vm_pd);
666                 ctx->vm_pd = NULL;
667                 return ret;
668         }
669
670         ctx->reserved = true;
671         return 0;
672 }
673
674 /**
675  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
676  * @mem: KFD BO structure.
677  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
678  * is used. Otherwise, a single VM associated with the BO.
679  * @map_type: the mapping status that will be used to filter the VMs.
680  * @ctx: the struct that will be used in unreserve_bo_and_vms().
681  *
682  * Returns 0 for success, negative for failure.
683  */
684 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
685                                 struct amdgpu_vm *vm, enum bo_vm_match map_type,
686                                 struct bo_vm_reservation_context *ctx)
687 {
688         struct amdgpu_bo *bo = mem->bo;
689         struct kfd_bo_va_list *entry;
690         unsigned int i;
691         int ret;
692
693         ctx->reserved = false;
694         ctx->n_vms = 0;
695         ctx->vm_pd = NULL;
696         ctx->sync = &mem->sync;
697
698         INIT_LIST_HEAD(&ctx->list);
699         INIT_LIST_HEAD(&ctx->duplicates);
700
701         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
702                 if ((vm && vm != entry->bo_va->base.vm) ||
703                         (entry->is_mapped != map_type
704                         && map_type != BO_VM_ALL))
705                         continue;
706
707                 ctx->n_vms++;
708         }
709
710         if (ctx->n_vms != 0) {
711                 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
712                                      GFP_KERNEL);
713                 if (!ctx->vm_pd)
714                         return -ENOMEM;
715         }
716
717         ctx->kfd_bo.priority = 0;
718         ctx->kfd_bo.tv.bo = &bo->tbo;
719         ctx->kfd_bo.tv.num_shared = 1;
720         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
721
722         i = 0;
723         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
724                 if ((vm && vm != entry->bo_va->base.vm) ||
725                         (entry->is_mapped != map_type
726                         && map_type != BO_VM_ALL))
727                         continue;
728
729                 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
730                                 &ctx->vm_pd[i]);
731                 i++;
732         }
733
734         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
735                                      false, &ctx->duplicates);
736         if (ret) {
737                 pr_err("Failed to reserve buffers in ttm.\n");
738                 kfree(ctx->vm_pd);
739                 ctx->vm_pd = NULL;
740                 return ret;
741         }
742
743         ctx->reserved = true;
744         return 0;
745 }
746
747 /**
748  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
749  * @ctx: Reservation context to unreserve
750  * @wait: Optionally wait for a sync object representing pending VM updates
751  * @intr: Whether the wait is interruptible
752  *
753  * Also frees any resources allocated in
754  * reserve_bo_and_(cond_)vm(s). Returns the status from
755  * amdgpu_sync_wait.
756  */
757 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
758                                  bool wait, bool intr)
759 {
760         int ret = 0;
761
762         if (wait)
763                 ret = amdgpu_sync_wait(ctx->sync, intr);
764
765         if (ctx->reserved)
766                 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
767         kfree(ctx->vm_pd);
768
769         ctx->sync = NULL;
770
771         ctx->reserved = false;
772         ctx->vm_pd = NULL;
773
774         return ret;
775 }
776
777 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
778                                 struct kfd_bo_va_list *entry,
779                                 struct amdgpu_sync *sync)
780 {
781         struct amdgpu_bo_va *bo_va = entry->bo_va;
782         struct amdgpu_vm *vm = bo_va->base.vm;
783
784         amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
785
786         amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
787
788         amdgpu_sync_fence(sync, bo_va->last_pt_update, false);
789
790         return 0;
791 }
792
793 static int update_gpuvm_pte(struct amdgpu_device *adev,
794                 struct kfd_bo_va_list *entry,
795                 struct amdgpu_sync *sync)
796 {
797         int ret;
798         struct amdgpu_bo_va *bo_va = entry->bo_va;
799
800         /* Update the page tables  */
801         ret = amdgpu_vm_bo_update(adev, bo_va, false);
802         if (ret) {
803                 pr_err("amdgpu_vm_bo_update failed\n");
804                 return ret;
805         }
806
807         return amdgpu_sync_fence(sync, bo_va->last_pt_update, false);
808 }
809
810 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
811                 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
812                 bool no_update_pte)
813 {
814         int ret;
815
816         /* Set virtual address for the allocation */
817         ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
818                                amdgpu_bo_size(entry->bo_va->base.bo),
819                                entry->pte_flags);
820         if (ret) {
821                 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
822                                 entry->va, ret);
823                 return ret;
824         }
825
826         if (no_update_pte)
827                 return 0;
828
829         ret = update_gpuvm_pte(adev, entry, sync);
830         if (ret) {
831                 pr_err("update_gpuvm_pte() failed\n");
832                 goto update_gpuvm_pte_failed;
833         }
834
835         return 0;
836
837 update_gpuvm_pte_failed:
838         unmap_bo_from_gpuvm(adev, entry, sync);
839         return ret;
840 }
841
842 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
843 {
844         struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
845
846         if (!sg)
847                 return NULL;
848         if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
849                 kfree(sg);
850                 return NULL;
851         }
852         sg->sgl->dma_address = addr;
853         sg->sgl->length = size;
854 #ifdef CONFIG_NEED_SG_DMA_LENGTH
855         sg->sgl->dma_length = size;
856 #endif
857         return sg;
858 }
859
860 static int process_validate_vms(struct amdkfd_process_info *process_info)
861 {
862         struct amdgpu_vm *peer_vm;
863         int ret;
864
865         list_for_each_entry(peer_vm, &process_info->vm_list_head,
866                             vm_list_node) {
867                 ret = vm_validate_pt_pd_bos(peer_vm);
868                 if (ret)
869                         return ret;
870         }
871
872         return 0;
873 }
874
875 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
876                                  struct amdgpu_sync *sync)
877 {
878         struct amdgpu_vm *peer_vm;
879         int ret;
880
881         list_for_each_entry(peer_vm, &process_info->vm_list_head,
882                             vm_list_node) {
883                 struct amdgpu_bo *pd = peer_vm->root.base.bo;
884
885                 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
886                                        AMDGPU_SYNC_NE_OWNER,
887                                        AMDGPU_FENCE_OWNER_KFD);
888                 if (ret)
889                         return ret;
890         }
891
892         return 0;
893 }
894
895 static int process_update_pds(struct amdkfd_process_info *process_info,
896                               struct amdgpu_sync *sync)
897 {
898         struct amdgpu_vm *peer_vm;
899         int ret;
900
901         list_for_each_entry(peer_vm, &process_info->vm_list_head,
902                             vm_list_node) {
903                 ret = vm_update_pds(peer_vm, sync);
904                 if (ret)
905                         return ret;
906         }
907
908         return 0;
909 }
910
911 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
912                        struct dma_fence **ef)
913 {
914         struct amdkfd_process_info *info = NULL;
915         int ret;
916
917         if (!*process_info) {
918                 info = kzalloc(sizeof(*info), GFP_KERNEL);
919                 if (!info)
920                         return -ENOMEM;
921
922                 mutex_init(&info->lock);
923                 INIT_LIST_HEAD(&info->vm_list_head);
924                 INIT_LIST_HEAD(&info->kfd_bo_list);
925                 INIT_LIST_HEAD(&info->userptr_valid_list);
926                 INIT_LIST_HEAD(&info->userptr_inval_list);
927
928                 info->eviction_fence =
929                         amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
930                                                    current->mm);
931                 if (!info->eviction_fence) {
932                         pr_err("Failed to create eviction fence\n");
933                         ret = -ENOMEM;
934                         goto create_evict_fence_fail;
935                 }
936
937                 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
938                 atomic_set(&info->evicted_bos, 0);
939                 INIT_DELAYED_WORK(&info->restore_userptr_work,
940                                   amdgpu_amdkfd_restore_userptr_worker);
941
942                 *process_info = info;
943                 *ef = dma_fence_get(&info->eviction_fence->base);
944         }
945
946         vm->process_info = *process_info;
947
948         /* Validate page directory and attach eviction fence */
949         ret = amdgpu_bo_reserve(vm->root.base.bo, true);
950         if (ret)
951                 goto reserve_pd_fail;
952         ret = vm_validate_pt_pd_bos(vm);
953         if (ret) {
954                 pr_err("validate_pt_pd_bos() failed\n");
955                 goto validate_pd_fail;
956         }
957         ret = amdgpu_bo_sync_wait(vm->root.base.bo,
958                                   AMDGPU_FENCE_OWNER_KFD, false);
959         if (ret)
960                 goto wait_pd_fail;
961         ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
962         if (ret)
963                 goto reserve_shared_fail;
964         amdgpu_bo_fence(vm->root.base.bo,
965                         &vm->process_info->eviction_fence->base, true);
966         amdgpu_bo_unreserve(vm->root.base.bo);
967
968         /* Update process info */
969         mutex_lock(&vm->process_info->lock);
970         list_add_tail(&vm->vm_list_node,
971                         &(vm->process_info->vm_list_head));
972         vm->process_info->n_vms++;
973         mutex_unlock(&vm->process_info->lock);
974
975         return 0;
976
977 reserve_shared_fail:
978 wait_pd_fail:
979 validate_pd_fail:
980         amdgpu_bo_unreserve(vm->root.base.bo);
981 reserve_pd_fail:
982         vm->process_info = NULL;
983         if (info) {
984                 /* Two fence references: one in info and one in *ef */
985                 dma_fence_put(&info->eviction_fence->base);
986                 dma_fence_put(*ef);
987                 *ef = NULL;
988                 *process_info = NULL;
989                 put_pid(info->pid);
990 create_evict_fence_fail:
991                 mutex_destroy(&info->lock);
992                 kfree(info);
993         }
994         return ret;
995 }
996
997 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
998                                           void **vm, void **process_info,
999                                           struct dma_fence **ef)
1000 {
1001         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1002         struct amdgpu_vm *new_vm;
1003         int ret;
1004
1005         new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
1006         if (!new_vm)
1007                 return -ENOMEM;
1008
1009         /* Initialize AMDGPU part of the VM */
1010         ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
1011         if (ret) {
1012                 pr_err("Failed init vm ret %d\n", ret);
1013                 goto amdgpu_vm_init_fail;
1014         }
1015
1016         /* Initialize KFD part of the VM and process info */
1017         ret = init_kfd_vm(new_vm, process_info, ef);
1018         if (ret)
1019                 goto init_kfd_vm_fail;
1020
1021         *vm = (void *) new_vm;
1022
1023         return 0;
1024
1025 init_kfd_vm_fail:
1026         amdgpu_vm_fini(adev, new_vm);
1027 amdgpu_vm_init_fail:
1028         kfree(new_vm);
1029         return ret;
1030 }
1031
1032 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1033                                            struct file *filp, unsigned int pasid,
1034                                            void **vm, void **process_info,
1035                                            struct dma_fence **ef)
1036 {
1037         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1038         struct drm_file *drm_priv = filp->private_data;
1039         struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
1040         struct amdgpu_vm *avm = &drv_priv->vm;
1041         int ret;
1042
1043         /* Already a compute VM? */
1044         if (avm->process_info)
1045                 return -EINVAL;
1046
1047         /* Convert VM into a compute VM */
1048         ret = amdgpu_vm_make_compute(adev, avm, pasid);
1049         if (ret)
1050                 return ret;
1051
1052         /* Initialize KFD part of the VM and process info */
1053         ret = init_kfd_vm(avm, process_info, ef);
1054         if (ret)
1055                 return ret;
1056
1057         *vm = (void *)avm;
1058
1059         return 0;
1060 }
1061
1062 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1063                                     struct amdgpu_vm *vm)
1064 {
1065         struct amdkfd_process_info *process_info = vm->process_info;
1066         struct amdgpu_bo *pd = vm->root.base.bo;
1067
1068         if (!process_info)
1069                 return;
1070
1071         /* Release eviction fence from PD */
1072         amdgpu_bo_reserve(pd, false);
1073         amdgpu_bo_fence(pd, NULL, false);
1074         amdgpu_bo_unreserve(pd);
1075
1076         /* Update process info */
1077         mutex_lock(&process_info->lock);
1078         process_info->n_vms--;
1079         list_del(&vm->vm_list_node);
1080         mutex_unlock(&process_info->lock);
1081
1082         vm->process_info = NULL;
1083
1084         /* Release per-process resources when last compute VM is destroyed */
1085         if (!process_info->n_vms) {
1086                 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1087                 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1088                 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1089
1090                 dma_fence_put(&process_info->eviction_fence->base);
1091                 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1092                 put_pid(process_info->pid);
1093                 mutex_destroy(&process_info->lock);
1094                 kfree(process_info);
1095         }
1096 }
1097
1098 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1099 {
1100         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1101         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1102
1103         if (WARN_ON(!kgd || !vm))
1104                 return;
1105
1106         pr_debug("Destroying process vm %p\n", vm);
1107
1108         /* Release the VM context */
1109         amdgpu_vm_fini(adev, avm);
1110         kfree(vm);
1111 }
1112
1113 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1114 {
1115         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1116         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1117
1118         if (WARN_ON(!kgd || !vm))
1119                 return;
1120
1121         pr_debug("Releasing process vm %p\n", vm);
1122
1123         /* The original pasid of amdgpu vm has already been
1124          * released during making a amdgpu vm to a compute vm
1125          * The current pasid is managed by kfd and will be
1126          * released on kfd process destroy. Set amdgpu pasid
1127          * to 0 to avoid duplicate release.
1128          */
1129         amdgpu_vm_release_compute(adev, avm);
1130 }
1131
1132 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1133 {
1134         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1135         struct amdgpu_bo *pd = avm->root.base.bo;
1136         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1137
1138         if (adev->asic_type < CHIP_VEGA10)
1139                 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1140         return avm->pd_phys_addr;
1141 }
1142
1143 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1144                 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1145                 void *vm, struct kgd_mem **mem,
1146                 uint64_t *offset, uint32_t flags)
1147 {
1148         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1149         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1150         enum ttm_bo_type bo_type = ttm_bo_type_device;
1151         struct sg_table *sg = NULL;
1152         uint64_t user_addr = 0;
1153         struct amdgpu_bo *bo;
1154         struct amdgpu_bo_param bp;
1155         u32 domain, alloc_domain;
1156         u64 alloc_flags;
1157         int ret;
1158
1159         /*
1160          * Check on which domain to allocate BO
1161          */
1162         if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1163                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1164                 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1165                 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1166                         AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1167                         AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1168         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1169                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1170                 alloc_flags = 0;
1171         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1172                 domain = AMDGPU_GEM_DOMAIN_GTT;
1173                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1174                 alloc_flags = 0;
1175                 if (!offset || !*offset)
1176                         return -EINVAL;
1177                 user_addr = untagged_addr(*offset);
1178         } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1179                         KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1180                 domain = AMDGPU_GEM_DOMAIN_GTT;
1181                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1182                 bo_type = ttm_bo_type_sg;
1183                 alloc_flags = 0;
1184                 if (size > UINT_MAX)
1185                         return -EINVAL;
1186                 sg = create_doorbell_sg(*offset, size);
1187                 if (!sg)
1188                         return -ENOMEM;
1189         } else {
1190                 return -EINVAL;
1191         }
1192
1193         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1194         if (!*mem) {
1195                 ret = -ENOMEM;
1196                 goto err;
1197         }
1198         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1199         mutex_init(&(*mem)->lock);
1200         (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1201
1202         /* Workaround for AQL queue wraparound bug. Map the same
1203          * memory twice. That means we only actually allocate half
1204          * the memory.
1205          */
1206         if ((*mem)->aql_queue)
1207                 size = size >> 1;
1208
1209         (*mem)->alloc_flags = flags;
1210
1211         amdgpu_sync_create(&(*mem)->sync);
1212
1213         ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1214         if (ret) {
1215                 pr_debug("Insufficient system memory\n");
1216                 goto err_reserve_limit;
1217         }
1218
1219         pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1220                         va, size, domain_string(alloc_domain));
1221
1222         memset(&bp, 0, sizeof(bp));
1223         bp.size = size;
1224         bp.byte_align = 1;
1225         bp.domain = alloc_domain;
1226         bp.flags = alloc_flags;
1227         bp.type = bo_type;
1228         bp.resv = NULL;
1229         ret = amdgpu_bo_create(adev, &bp, &bo);
1230         if (ret) {
1231                 pr_debug("Failed to create BO on domain %s. ret %d\n",
1232                                 domain_string(alloc_domain), ret);
1233                 goto err_bo_create;
1234         }
1235         if (bo_type == ttm_bo_type_sg) {
1236                 bo->tbo.sg = sg;
1237                 bo->tbo.ttm->sg = sg;
1238         }
1239         bo->kfd_bo = *mem;
1240         (*mem)->bo = bo;
1241         if (user_addr)
1242                 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1243
1244         (*mem)->va = va;
1245         (*mem)->domain = domain;
1246         (*mem)->mapped_to_gpu_memory = 0;
1247         (*mem)->process_info = avm->process_info;
1248         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1249
1250         if (user_addr) {
1251                 ret = init_user_pages(*mem, user_addr);
1252                 if (ret)
1253                         goto allocate_init_user_pages_failed;
1254         }
1255
1256         if (offset)
1257                 *offset = amdgpu_bo_mmap_offset(bo);
1258
1259         return 0;
1260
1261 allocate_init_user_pages_failed:
1262         remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1263         amdgpu_bo_unref(&bo);
1264         /* Don't unreserve system mem limit twice */
1265         goto err_reserve_limit;
1266 err_bo_create:
1267         unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1268 err_reserve_limit:
1269         mutex_destroy(&(*mem)->lock);
1270         kfree(*mem);
1271 err:
1272         if (sg) {
1273                 sg_free_table(sg);
1274                 kfree(sg);
1275         }
1276         return ret;
1277 }
1278
1279 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1280                 struct kgd_dev *kgd, struct kgd_mem *mem)
1281 {
1282         struct amdkfd_process_info *process_info = mem->process_info;
1283         unsigned long bo_size = mem->bo->tbo.mem.size;
1284         struct kfd_bo_va_list *entry, *tmp;
1285         struct bo_vm_reservation_context ctx;
1286         struct ttm_validate_buffer *bo_list_entry;
1287         unsigned int mapped_to_gpu_memory;
1288         int ret;
1289
1290         mutex_lock(&mem->lock);
1291         mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1292         mutex_unlock(&mem->lock);
1293         /* lock is not needed after this, since mem is unused and will
1294          * be freed anyway
1295          */
1296
1297         if (mapped_to_gpu_memory > 0) {
1298                 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1299                                 mem->va, bo_size);
1300                 return -EBUSY;
1301         }
1302
1303         /* No more MMU notifiers */
1304         amdgpu_mn_unregister(mem->bo);
1305
1306         /* Make sure restore workers don't access the BO any more */
1307         bo_list_entry = &mem->validate_list;
1308         mutex_lock(&process_info->lock);
1309         list_del(&bo_list_entry->head);
1310         mutex_unlock(&process_info->lock);
1311
1312         ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1313         if (unlikely(ret))
1314                 return ret;
1315
1316         /* The eviction fence should be removed by the last unmap.
1317          * TODO: Log an error condition if the bo still has the eviction fence
1318          * attached
1319          */
1320         amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1321                                         process_info->eviction_fence);
1322         pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1323                 mem->va + bo_size * (1 + mem->aql_queue));
1324
1325         /* Remove from VM internal data structures */
1326         list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1327                 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1328                                 entry, bo_size);
1329
1330         ret = unreserve_bo_and_vms(&ctx, false, false);
1331
1332         /* Free the sync object */
1333         amdgpu_sync_free(&mem->sync);
1334
1335         /* If the SG is not NULL, it's one we created for a doorbell or mmio
1336          * remap BO. We need to free it.
1337          */
1338         if (mem->bo->tbo.sg) {
1339                 sg_free_table(mem->bo->tbo.sg);
1340                 kfree(mem->bo->tbo.sg);
1341         }
1342
1343         /* Free the BO*/
1344         amdgpu_bo_unref(&mem->bo);
1345         mutex_destroy(&mem->lock);
1346         kfree(mem);
1347
1348         return ret;
1349 }
1350
1351 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1352                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1353 {
1354         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1355         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1356         int ret;
1357         struct amdgpu_bo *bo;
1358         uint32_t domain;
1359         struct kfd_bo_va_list *entry;
1360         struct bo_vm_reservation_context ctx;
1361         struct kfd_bo_va_list *bo_va_entry = NULL;
1362         struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1363         unsigned long bo_size;
1364         bool is_invalid_userptr = false;
1365
1366         bo = mem->bo;
1367         if (!bo) {
1368                 pr_err("Invalid BO when mapping memory to GPU\n");
1369                 return -EINVAL;
1370         }
1371
1372         /* Make sure restore is not running concurrently. Since we
1373          * don't map invalid userptr BOs, we rely on the next restore
1374          * worker to do the mapping
1375          */
1376         mutex_lock(&mem->process_info->lock);
1377
1378         /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1379          * sure that the MMU notifier is no longer running
1380          * concurrently and the queues are actually stopped
1381          */
1382         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1383                 down_write(&current->mm->mmap_sem);
1384                 is_invalid_userptr = atomic_read(&mem->invalid);
1385                 up_write(&current->mm->mmap_sem);
1386         }
1387
1388         mutex_lock(&mem->lock);
1389
1390         domain = mem->domain;
1391         bo_size = bo->tbo.mem.size;
1392
1393         pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1394                         mem->va,
1395                         mem->va + bo_size * (1 + mem->aql_queue),
1396                         vm, domain_string(domain));
1397
1398         ret = reserve_bo_and_vm(mem, vm, &ctx);
1399         if (unlikely(ret))
1400                 goto out;
1401
1402         /* Userptr can be marked as "not invalid", but not actually be
1403          * validated yet (still in the system domain). In that case
1404          * the queues are still stopped and we can leave mapping for
1405          * the next restore worker
1406          */
1407         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1408             bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1409                 is_invalid_userptr = true;
1410
1411         if (check_if_add_bo_to_vm(avm, mem)) {
1412                 ret = add_bo_to_vm(adev, mem, avm, false,
1413                                 &bo_va_entry);
1414                 if (ret)
1415                         goto add_bo_to_vm_failed;
1416                 if (mem->aql_queue) {
1417                         ret = add_bo_to_vm(adev, mem, avm,
1418                                         true, &bo_va_entry_aql);
1419                         if (ret)
1420                                 goto add_bo_to_vm_failed_aql;
1421                 }
1422         } else {
1423                 ret = vm_validate_pt_pd_bos(avm);
1424                 if (unlikely(ret))
1425                         goto add_bo_to_vm_failed;
1426         }
1427
1428         if (mem->mapped_to_gpu_memory == 0 &&
1429             !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1430                 /* Validate BO only once. The eviction fence gets added to BO
1431                  * the first time it is mapped. Validate will wait for all
1432                  * background evictions to complete.
1433                  */
1434                 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1435                 if (ret) {
1436                         pr_debug("Validate failed\n");
1437                         goto map_bo_to_gpuvm_failed;
1438                 }
1439         }
1440
1441         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1442                 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1443                         pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1444                                         entry->va, entry->va + bo_size,
1445                                         entry);
1446
1447                         ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1448                                               is_invalid_userptr);
1449                         if (ret) {
1450                                 pr_err("Failed to map bo to gpuvm\n");
1451                                 goto map_bo_to_gpuvm_failed;
1452                         }
1453
1454                         ret = vm_update_pds(vm, ctx.sync);
1455                         if (ret) {
1456                                 pr_err("Failed to update page directories\n");
1457                                 goto map_bo_to_gpuvm_failed;
1458                         }
1459
1460                         entry->is_mapped = true;
1461                         mem->mapped_to_gpu_memory++;
1462                         pr_debug("\t INC mapping count %d\n",
1463                                         mem->mapped_to_gpu_memory);
1464                 }
1465         }
1466
1467         if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1468                 amdgpu_bo_fence(bo,
1469                                 &avm->process_info->eviction_fence->base,
1470                                 true);
1471         ret = unreserve_bo_and_vms(&ctx, false, false);
1472
1473         goto out;
1474
1475 map_bo_to_gpuvm_failed:
1476         if (bo_va_entry_aql)
1477                 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1478 add_bo_to_vm_failed_aql:
1479         if (bo_va_entry)
1480                 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1481 add_bo_to_vm_failed:
1482         unreserve_bo_and_vms(&ctx, false, false);
1483 out:
1484         mutex_unlock(&mem->process_info->lock);
1485         mutex_unlock(&mem->lock);
1486         return ret;
1487 }
1488
1489 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1490                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1491 {
1492         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1493         struct amdkfd_process_info *process_info =
1494                 ((struct amdgpu_vm *)vm)->process_info;
1495         unsigned long bo_size = mem->bo->tbo.mem.size;
1496         struct kfd_bo_va_list *entry;
1497         struct bo_vm_reservation_context ctx;
1498         int ret;
1499
1500         mutex_lock(&mem->lock);
1501
1502         ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1503         if (unlikely(ret))
1504                 goto out;
1505         /* If no VMs were reserved, it means the BO wasn't actually mapped */
1506         if (ctx.n_vms == 0) {
1507                 ret = -EINVAL;
1508                 goto unreserve_out;
1509         }
1510
1511         ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1512         if (unlikely(ret))
1513                 goto unreserve_out;
1514
1515         pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1516                 mem->va,
1517                 mem->va + bo_size * (1 + mem->aql_queue),
1518                 vm);
1519
1520         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1521                 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1522                         pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1523                                         entry->va,
1524                                         entry->va + bo_size,
1525                                         entry);
1526
1527                         ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1528                         if (ret == 0) {
1529                                 entry->is_mapped = false;
1530                         } else {
1531                                 pr_err("failed to unmap VA 0x%llx\n",
1532                                                 mem->va);
1533                                 goto unreserve_out;
1534                         }
1535
1536                         mem->mapped_to_gpu_memory--;
1537                         pr_debug("\t DEC mapping count %d\n",
1538                                         mem->mapped_to_gpu_memory);
1539                 }
1540         }
1541
1542         /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1543          * required.
1544          */
1545         if (mem->mapped_to_gpu_memory == 0 &&
1546             !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1547                 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1548                                                 process_info->eviction_fence);
1549
1550 unreserve_out:
1551         unreserve_bo_and_vms(&ctx, false, false);
1552 out:
1553         mutex_unlock(&mem->lock);
1554         return ret;
1555 }
1556
1557 int amdgpu_amdkfd_gpuvm_sync_memory(
1558                 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1559 {
1560         struct amdgpu_sync sync;
1561         int ret;
1562
1563         amdgpu_sync_create(&sync);
1564
1565         mutex_lock(&mem->lock);
1566         amdgpu_sync_clone(&mem->sync, &sync);
1567         mutex_unlock(&mem->lock);
1568
1569         ret = amdgpu_sync_wait(&sync, intr);
1570         amdgpu_sync_free(&sync);
1571         return ret;
1572 }
1573
1574 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1575                 struct kgd_mem *mem, void **kptr, uint64_t *size)
1576 {
1577         int ret;
1578         struct amdgpu_bo *bo = mem->bo;
1579
1580         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1581                 pr_err("userptr can't be mapped to kernel\n");
1582                 return -EINVAL;
1583         }
1584
1585         /* delete kgd_mem from kfd_bo_list to avoid re-validating
1586          * this BO in BO's restoring after eviction.
1587          */
1588         mutex_lock(&mem->process_info->lock);
1589
1590         ret = amdgpu_bo_reserve(bo, true);
1591         if (ret) {
1592                 pr_err("Failed to reserve bo. ret %d\n", ret);
1593                 goto bo_reserve_failed;
1594         }
1595
1596         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1597         if (ret) {
1598                 pr_err("Failed to pin bo. ret %d\n", ret);
1599                 goto pin_failed;
1600         }
1601
1602         ret = amdgpu_bo_kmap(bo, kptr);
1603         if (ret) {
1604                 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1605                 goto kmap_failed;
1606         }
1607
1608         amdgpu_amdkfd_remove_eviction_fence(
1609                 bo, mem->process_info->eviction_fence);
1610         list_del_init(&mem->validate_list.head);
1611
1612         if (size)
1613                 *size = amdgpu_bo_size(bo);
1614
1615         amdgpu_bo_unreserve(bo);
1616
1617         mutex_unlock(&mem->process_info->lock);
1618         return 0;
1619
1620 kmap_failed:
1621         amdgpu_bo_unpin(bo);
1622 pin_failed:
1623         amdgpu_bo_unreserve(bo);
1624 bo_reserve_failed:
1625         mutex_unlock(&mem->process_info->lock);
1626
1627         return ret;
1628 }
1629
1630 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1631                                               struct kfd_vm_fault_info *mem)
1632 {
1633         struct amdgpu_device *adev;
1634
1635         adev = (struct amdgpu_device *)kgd;
1636         if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1637                 *mem = *adev->gmc.vm_fault_info;
1638                 mb();
1639                 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1640         }
1641         return 0;
1642 }
1643
1644 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1645                                       struct dma_buf *dma_buf,
1646                                       uint64_t va, void *vm,
1647                                       struct kgd_mem **mem, uint64_t *size,
1648                                       uint64_t *mmap_offset)
1649 {
1650         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1651         struct drm_gem_object *obj;
1652         struct amdgpu_bo *bo;
1653         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1654
1655         if (dma_buf->ops != &amdgpu_dmabuf_ops)
1656                 /* Can't handle non-graphics buffers */
1657                 return -EINVAL;
1658
1659         obj = dma_buf->priv;
1660         if (obj->dev->dev_private != adev)
1661                 /* Can't handle buffers from other devices */
1662                 return -EINVAL;
1663
1664         bo = gem_to_amdgpu_bo(obj);
1665         if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1666                                     AMDGPU_GEM_DOMAIN_GTT)))
1667                 /* Only VRAM and GTT BOs are supported */
1668                 return -EINVAL;
1669
1670         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1671         if (!*mem)
1672                 return -ENOMEM;
1673
1674         if (size)
1675                 *size = amdgpu_bo_size(bo);
1676
1677         if (mmap_offset)
1678                 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1679
1680         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1681         mutex_init(&(*mem)->lock);
1682         
1683         (*mem)->alloc_flags =
1684                 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1685                 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1686                 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1687                 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1688
1689         (*mem)->bo = amdgpu_bo_ref(bo);
1690         (*mem)->va = va;
1691         (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1692                 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1693         (*mem)->mapped_to_gpu_memory = 0;
1694         (*mem)->process_info = avm->process_info;
1695         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1696         amdgpu_sync_create(&(*mem)->sync);
1697
1698         return 0;
1699 }
1700
1701 /* Evict a userptr BO by stopping the queues if necessary
1702  *
1703  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1704  * cannot do any memory allocations, and cannot take any locks that
1705  * are held elsewhere while allocating memory. Therefore this is as
1706  * simple as possible, using atomic counters.
1707  *
1708  * It doesn't do anything to the BO itself. The real work happens in
1709  * restore, where we get updated page addresses. This function only
1710  * ensures that GPU access to the BO is stopped.
1711  */
1712 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1713                                 struct mm_struct *mm)
1714 {
1715         struct amdkfd_process_info *process_info = mem->process_info;
1716         int evicted_bos;
1717         int r = 0;
1718
1719         atomic_inc(&mem->invalid);
1720         evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1721         if (evicted_bos == 1) {
1722                 /* First eviction, stop the queues */
1723                 r = kgd2kfd_quiesce_mm(mm);
1724                 if (r)
1725                         pr_err("Failed to quiesce KFD\n");
1726                 schedule_delayed_work(&process_info->restore_userptr_work,
1727                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1728         }
1729
1730         return r;
1731 }
1732
1733 /* Update invalid userptr BOs
1734  *
1735  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1736  * userptr_inval_list and updates user pages for all BOs that have
1737  * been invalidated since their last update.
1738  */
1739 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1740                                      struct mm_struct *mm)
1741 {
1742         struct kgd_mem *mem, *tmp_mem;
1743         struct amdgpu_bo *bo;
1744         struct ttm_operation_ctx ctx = { false, false };
1745         int invalid, ret;
1746
1747         /* Move all invalidated BOs to the userptr_inval_list and
1748          * release their user pages by migration to the CPU domain
1749          */
1750         list_for_each_entry_safe(mem, tmp_mem,
1751                                  &process_info->userptr_valid_list,
1752                                  validate_list.head) {
1753                 if (!atomic_read(&mem->invalid))
1754                         continue; /* BO is still valid */
1755
1756                 bo = mem->bo;
1757
1758                 if (amdgpu_bo_reserve(bo, true))
1759                         return -EAGAIN;
1760                 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1761                 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1762                 amdgpu_bo_unreserve(bo);
1763                 if (ret) {
1764                         pr_err("%s: Failed to invalidate userptr BO\n",
1765                                __func__);
1766                         return -EAGAIN;
1767                 }
1768
1769                 list_move_tail(&mem->validate_list.head,
1770                                &process_info->userptr_inval_list);
1771         }
1772
1773         if (list_empty(&process_info->userptr_inval_list))
1774                 return 0; /* All evicted userptr BOs were freed */
1775
1776         /* Go through userptr_inval_list and update any invalid user_pages */
1777         list_for_each_entry(mem, &process_info->userptr_inval_list,
1778                             validate_list.head) {
1779                 invalid = atomic_read(&mem->invalid);
1780                 if (!invalid)
1781                         /* BO hasn't been invalidated since the last
1782                          * revalidation attempt. Keep its BO list.
1783                          */
1784                         continue;
1785
1786                 bo = mem->bo;
1787
1788                 /* Get updated user pages */
1789                 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1790                 if (ret) {
1791                         pr_debug("%s: Failed to get user pages: %d\n",
1792                                 __func__, ret);
1793
1794                         /* Return error -EBUSY or -ENOMEM, retry restore */
1795                         return ret;
1796                 }
1797
1798                 /*
1799                  * FIXME: Cannot ignore the return code, must hold
1800                  * notifier_lock
1801                  */
1802                 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1803
1804                 /* Mark the BO as valid unless it was invalidated
1805                  * again concurrently.
1806                  */
1807                 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1808                         return -EAGAIN;
1809         }
1810
1811         return 0;
1812 }
1813
1814 /* Validate invalid userptr BOs
1815  *
1816  * Validates BOs on the userptr_inval_list, and moves them back to the
1817  * userptr_valid_list. Also updates GPUVM page tables with new page
1818  * addresses and waits for the page table updates to complete.
1819  */
1820 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1821 {
1822         struct amdgpu_bo_list_entry *pd_bo_list_entries;
1823         struct list_head resv_list, duplicates;
1824         struct ww_acquire_ctx ticket;
1825         struct amdgpu_sync sync;
1826
1827         struct amdgpu_vm *peer_vm;
1828         struct kgd_mem *mem, *tmp_mem;
1829         struct amdgpu_bo *bo;
1830         struct ttm_operation_ctx ctx = { false, false };
1831         int i, ret;
1832
1833         pd_bo_list_entries = kcalloc(process_info->n_vms,
1834                                      sizeof(struct amdgpu_bo_list_entry),
1835                                      GFP_KERNEL);
1836         if (!pd_bo_list_entries) {
1837                 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1838                 ret = -ENOMEM;
1839                 goto out_no_mem;
1840         }
1841
1842         INIT_LIST_HEAD(&resv_list);
1843         INIT_LIST_HEAD(&duplicates);
1844
1845         /* Get all the page directory BOs that need to be reserved */
1846         i = 0;
1847         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1848                             vm_list_node)
1849                 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1850                                     &pd_bo_list_entries[i++]);
1851         /* Add the userptr_inval_list entries to resv_list */
1852         list_for_each_entry(mem, &process_info->userptr_inval_list,
1853                             validate_list.head) {
1854                 list_add_tail(&mem->resv_list.head, &resv_list);
1855                 mem->resv_list.bo = mem->validate_list.bo;
1856                 mem->resv_list.num_shared = mem->validate_list.num_shared;
1857         }
1858
1859         /* Reserve all BOs and page tables for validation */
1860         ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1861         WARN(!list_empty(&duplicates), "Duplicates should be empty");
1862         if (ret)
1863                 goto out_free;
1864
1865         amdgpu_sync_create(&sync);
1866
1867         ret = process_validate_vms(process_info);
1868         if (ret)
1869                 goto unreserve_out;
1870
1871         /* Validate BOs and update GPUVM page tables */
1872         list_for_each_entry_safe(mem, tmp_mem,
1873                                  &process_info->userptr_inval_list,
1874                                  validate_list.head) {
1875                 struct kfd_bo_va_list *bo_va_entry;
1876
1877                 bo = mem->bo;
1878
1879                 /* Validate the BO if we got user pages */
1880                 if (bo->tbo.ttm->pages[0]) {
1881                         amdgpu_bo_placement_from_domain(bo, mem->domain);
1882                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1883                         if (ret) {
1884                                 pr_err("%s: failed to validate BO\n", __func__);
1885                                 goto unreserve_out;
1886                         }
1887                 }
1888
1889                 list_move_tail(&mem->validate_list.head,
1890                                &process_info->userptr_valid_list);
1891
1892                 /* Update mapping. If the BO was not validated
1893                  * (because we couldn't get user pages), this will
1894                  * clear the page table entries, which will result in
1895                  * VM faults if the GPU tries to access the invalid
1896                  * memory.
1897                  */
1898                 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1899                         if (!bo_va_entry->is_mapped)
1900                                 continue;
1901
1902                         ret = update_gpuvm_pte((struct amdgpu_device *)
1903                                                bo_va_entry->kgd_dev,
1904                                                bo_va_entry, &sync);
1905                         if (ret) {
1906                                 pr_err("%s: update PTE failed\n", __func__);
1907                                 /* make sure this gets validated again */
1908                                 atomic_inc(&mem->invalid);
1909                                 goto unreserve_out;
1910                         }
1911                 }
1912         }
1913
1914         /* Update page directories */
1915         ret = process_update_pds(process_info, &sync);
1916
1917 unreserve_out:
1918         ttm_eu_backoff_reservation(&ticket, &resv_list);
1919         amdgpu_sync_wait(&sync, false);
1920         amdgpu_sync_free(&sync);
1921 out_free:
1922         kfree(pd_bo_list_entries);
1923 out_no_mem:
1924
1925         return ret;
1926 }
1927
1928 /* Worker callback to restore evicted userptr BOs
1929  *
1930  * Tries to update and validate all userptr BOs. If successful and no
1931  * concurrent evictions happened, the queues are restarted. Otherwise,
1932  * reschedule for another attempt later.
1933  */
1934 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1935 {
1936         struct delayed_work *dwork = to_delayed_work(work);
1937         struct amdkfd_process_info *process_info =
1938                 container_of(dwork, struct amdkfd_process_info,
1939                              restore_userptr_work);
1940         struct task_struct *usertask;
1941         struct mm_struct *mm;
1942         int evicted_bos;
1943
1944         evicted_bos = atomic_read(&process_info->evicted_bos);
1945         if (!evicted_bos)
1946                 return;
1947
1948         /* Reference task and mm in case of concurrent process termination */
1949         usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1950         if (!usertask)
1951                 return;
1952         mm = get_task_mm(usertask);
1953         if (!mm) {
1954                 put_task_struct(usertask);
1955                 return;
1956         }
1957
1958         mutex_lock(&process_info->lock);
1959
1960         if (update_invalid_user_pages(process_info, mm))
1961                 goto unlock_out;
1962         /* userptr_inval_list can be empty if all evicted userptr BOs
1963          * have been freed. In that case there is nothing to validate
1964          * and we can just restart the queues.
1965          */
1966         if (!list_empty(&process_info->userptr_inval_list)) {
1967                 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1968                         goto unlock_out; /* Concurrent eviction, try again */
1969
1970                 if (validate_invalid_user_pages(process_info))
1971                         goto unlock_out;
1972         }
1973         /* Final check for concurrent evicton and atomic update. If
1974          * another eviction happens after successful update, it will
1975          * be a first eviction that calls quiesce_mm. The eviction
1976          * reference counting inside KFD will handle this case.
1977          */
1978         if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1979             evicted_bos)
1980                 goto unlock_out;
1981         evicted_bos = 0;
1982         if (kgd2kfd_resume_mm(mm)) {
1983                 pr_err("%s: Failed to resume KFD\n", __func__);
1984                 /* No recovery from this failure. Probably the CP is
1985                  * hanging. No point trying again.
1986                  */
1987         }
1988
1989 unlock_out:
1990         mutex_unlock(&process_info->lock);
1991         mmput(mm);
1992         put_task_struct(usertask);
1993
1994         /* If validation failed, reschedule another attempt */
1995         if (evicted_bos)
1996                 schedule_delayed_work(&process_info->restore_userptr_work,
1997                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1998 }
1999
2000 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2001  *   KFD process identified by process_info
2002  *
2003  * @process_info: amdkfd_process_info of the KFD process
2004  *
2005  * After memory eviction, restore thread calls this function. The function
2006  * should be called when the Process is still valid. BO restore involves -
2007  *
2008  * 1.  Release old eviction fence and create new one
2009  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2010  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2011  *     BOs that need to be reserved.
2012  * 4.  Reserve all the BOs
2013  * 5.  Validate of PD and PT BOs.
2014  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2015  * 7.  Add fence to all PD and PT BOs.
2016  * 8.  Unreserve all BOs
2017  */
2018 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2019 {
2020         struct amdgpu_bo_list_entry *pd_bo_list;
2021         struct amdkfd_process_info *process_info = info;
2022         struct amdgpu_vm *peer_vm;
2023         struct kgd_mem *mem;
2024         struct bo_vm_reservation_context ctx;
2025         struct amdgpu_amdkfd_fence *new_fence;
2026         int ret = 0, i;
2027         struct list_head duplicate_save;
2028         struct amdgpu_sync sync_obj;
2029
2030         INIT_LIST_HEAD(&duplicate_save);
2031         INIT_LIST_HEAD(&ctx.list);
2032         INIT_LIST_HEAD(&ctx.duplicates);
2033
2034         pd_bo_list = kcalloc(process_info->n_vms,
2035                              sizeof(struct amdgpu_bo_list_entry),
2036                              GFP_KERNEL);
2037         if (!pd_bo_list)
2038                 return -ENOMEM;
2039
2040         i = 0;
2041         mutex_lock(&process_info->lock);
2042         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2043                         vm_list_node)
2044                 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2045
2046         /* Reserve all BOs and page tables/directory. Add all BOs from
2047          * kfd_bo_list to ctx.list
2048          */
2049         list_for_each_entry(mem, &process_info->kfd_bo_list,
2050                             validate_list.head) {
2051
2052                 list_add_tail(&mem->resv_list.head, &ctx.list);
2053                 mem->resv_list.bo = mem->validate_list.bo;
2054                 mem->resv_list.num_shared = mem->validate_list.num_shared;
2055         }
2056
2057         ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2058                                      false, &duplicate_save);
2059         if (ret) {
2060                 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2061                 goto ttm_reserve_fail;
2062         }
2063
2064         amdgpu_sync_create(&sync_obj);
2065
2066         /* Validate PDs and PTs */
2067         ret = process_validate_vms(process_info);
2068         if (ret)
2069                 goto validate_map_fail;
2070
2071         ret = process_sync_pds_resv(process_info, &sync_obj);
2072         if (ret) {
2073                 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2074                 goto validate_map_fail;
2075         }
2076
2077         /* Validate BOs and map them to GPUVM (update VM page tables). */
2078         list_for_each_entry(mem, &process_info->kfd_bo_list,
2079                             validate_list.head) {
2080
2081                 struct amdgpu_bo *bo = mem->bo;
2082                 uint32_t domain = mem->domain;
2083                 struct kfd_bo_va_list *bo_va_entry;
2084
2085                 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2086                 if (ret) {
2087                         pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2088                         goto validate_map_fail;
2089                 }
2090                 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving, false);
2091                 if (ret) {
2092                         pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2093                         goto validate_map_fail;
2094                 }
2095                 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2096                                     bo_list) {
2097                         ret = update_gpuvm_pte((struct amdgpu_device *)
2098                                               bo_va_entry->kgd_dev,
2099                                               bo_va_entry,
2100                                               &sync_obj);
2101                         if (ret) {
2102                                 pr_debug("Memory eviction: update PTE failed. Try again\n");
2103                                 goto validate_map_fail;
2104                         }
2105                 }
2106         }
2107
2108         /* Update page directories */
2109         ret = process_update_pds(process_info, &sync_obj);
2110         if (ret) {
2111                 pr_debug("Memory eviction: update PDs failed. Try again\n");
2112                 goto validate_map_fail;
2113         }
2114
2115         /* Wait for validate and PT updates to finish */
2116         amdgpu_sync_wait(&sync_obj, false);
2117
2118         /* Release old eviction fence and create new one, because fence only
2119          * goes from unsignaled to signaled, fence cannot be reused.
2120          * Use context and mm from the old fence.
2121          */
2122         new_fence = amdgpu_amdkfd_fence_create(
2123                                 process_info->eviction_fence->base.context,
2124                                 process_info->eviction_fence->mm);
2125         if (!new_fence) {
2126                 pr_err("Failed to create eviction fence\n");
2127                 ret = -ENOMEM;
2128                 goto validate_map_fail;
2129         }
2130         dma_fence_put(&process_info->eviction_fence->base);
2131         process_info->eviction_fence = new_fence;
2132         *ef = dma_fence_get(&new_fence->base);
2133
2134         /* Attach new eviction fence to all BOs */
2135         list_for_each_entry(mem, &process_info->kfd_bo_list,
2136                 validate_list.head)
2137                 amdgpu_bo_fence(mem->bo,
2138                         &process_info->eviction_fence->base, true);
2139
2140         /* Attach eviction fence to PD / PT BOs */
2141         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2142                             vm_list_node) {
2143                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2144
2145                 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2146         }
2147
2148 validate_map_fail:
2149         ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2150         amdgpu_sync_free(&sync_obj);
2151 ttm_reserve_fail:
2152         mutex_unlock(&process_info->lock);
2153         kfree(pd_bo_list);
2154         return ret;
2155 }
2156
2157 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2158 {
2159         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2160         struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2161         int ret;
2162
2163         if (!info || !gws)
2164                 return -EINVAL;
2165
2166         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2167         if (!*mem)
2168                 return -ENOMEM;
2169
2170         mutex_init(&(*mem)->lock);
2171         INIT_LIST_HEAD(&(*mem)->bo_va_list);
2172         (*mem)->bo = amdgpu_bo_ref(gws_bo);
2173         (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2174         (*mem)->process_info = process_info;
2175         add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2176         amdgpu_sync_create(&(*mem)->sync);
2177
2178
2179         /* Validate gws bo the first time it is added to process */
2180         mutex_lock(&(*mem)->process_info->lock);
2181         ret = amdgpu_bo_reserve(gws_bo, false);
2182         if (unlikely(ret)) {
2183                 pr_err("Reserve gws bo failed %d\n", ret);
2184                 goto bo_reservation_failure;
2185         }
2186
2187         ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2188         if (ret) {
2189                 pr_err("GWS BO validate failed %d\n", ret);
2190                 goto bo_validation_failure;
2191         }
2192         /* GWS resource is shared b/t amdgpu and amdkfd
2193          * Add process eviction fence to bo so they can
2194          * evict each other.
2195          */
2196         ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2197         if (ret)
2198                 goto reserve_shared_fail;
2199         amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2200         amdgpu_bo_unreserve(gws_bo);
2201         mutex_unlock(&(*mem)->process_info->lock);
2202
2203         return ret;
2204
2205 reserve_shared_fail:
2206 bo_validation_failure:
2207         amdgpu_bo_unreserve(gws_bo);
2208 bo_reservation_failure:
2209         mutex_unlock(&(*mem)->process_info->lock);
2210         amdgpu_sync_free(&(*mem)->sync);
2211         remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2212         amdgpu_bo_unref(&gws_bo);
2213         mutex_destroy(&(*mem)->lock);
2214         kfree(*mem);
2215         *mem = NULL;
2216         return ret;
2217 }
2218
2219 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2220 {
2221         int ret;
2222         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2223         struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2224         struct amdgpu_bo *gws_bo = kgd_mem->bo;
2225
2226         /* Remove BO from process's validate list so restore worker won't touch
2227          * it anymore
2228          */
2229         remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2230
2231         ret = amdgpu_bo_reserve(gws_bo, false);
2232         if (unlikely(ret)) {
2233                 pr_err("Reserve gws bo failed %d\n", ret);
2234                 //TODO add BO back to validate_list?
2235                 return ret;
2236         }
2237         amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2238                         process_info->eviction_fence);
2239         amdgpu_bo_unreserve(gws_bo);
2240         amdgpu_sync_free(&kgd_mem->sync);
2241         amdgpu_bo_unref(&gws_bo);
2242         mutex_destroy(&kgd_mem->lock);
2243         kfree(mem);
2244         return 0;
2245 }
2246
2247 /* Returns GPU-specific tiling mode information */
2248 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2249                                 struct tile_config *config)
2250 {
2251         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2252
2253         config->gb_addr_config = adev->gfx.config.gb_addr_config;
2254         config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2255         config->num_tile_configs =
2256                         ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2257         config->macro_tile_config_ptr =
2258                         adev->gfx.config.macrotile_mode_array;
2259         config->num_macro_tile_configs =
2260                         ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2261
2262         /* Those values are not set from GFX9 onwards */
2263         config->num_banks = adev->gfx.config.num_banks;
2264         config->num_ranks = adev->gfx.config.num_ranks;
2265
2266         return 0;
2267 }
This page took 0.164034 seconds and 4 git commands to generate.