]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
Merge tag 'armsoc-dt' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #define pr_fmt(fmt) "kfd2kgd: " fmt
24
25 #include <linux/list.h>
26 #include <linux/pagemap.h>
27 #include <linux/sched/mm.h>
28 #include <drm/drmP.h>
29 #include "amdgpu_object.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_amdkfd.h"
32
33 /* Special VM and GART address alignment needed for VI pre-Fiji due to
34  * a HW bug.
35  */
36 #define VI_BO_SIZE_ALIGN (0x8000)
37
38 /* BO flag to indicate a KFD userptr BO */
39 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
40
41 /* Userptr restore delay, just long enough to allow consecutive VM
42  * changes to accumulate
43  */
44 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
45
46 /* Impose limit on how much memory KFD can use */
47 static struct {
48         uint64_t max_system_mem_limit;
49         uint64_t max_userptr_mem_limit;
50         int64_t system_mem_used;
51         int64_t userptr_mem_used;
52         spinlock_t mem_limit_lock;
53 } kfd_mem_limit;
54
55 /* Struct used for amdgpu_amdkfd_bo_validate */
56 struct amdgpu_vm_parser {
57         uint32_t        domain;
58         bool            wait;
59 };
60
61 static const char * const domain_bit_to_string[] = {
62                 "CPU",
63                 "GTT",
64                 "VRAM",
65                 "GDS",
66                 "GWS",
67                 "OA"
68 };
69
70 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
71
72 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
73
74
75 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
76 {
77         return (struct amdgpu_device *)kgd;
78 }
79
80 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
81                 struct kgd_mem *mem)
82 {
83         struct kfd_bo_va_list *entry;
84
85         list_for_each_entry(entry, &mem->bo_va_list, bo_list)
86                 if (entry->bo_va->base.vm == avm)
87                         return false;
88
89         return true;
90 }
91
92 /* Set memory usage limits. Current, limits are
93  *  System (kernel) memory - 3/8th System RAM
94  *  Userptr memory - 3/4th System RAM
95  */
96 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
97 {
98         struct sysinfo si;
99         uint64_t mem;
100
101         si_meminfo(&si);
102         mem = si.totalram - si.totalhigh;
103         mem *= si.mem_unit;
104
105         spin_lock_init(&kfd_mem_limit.mem_limit_lock);
106         kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3);
107         kfd_mem_limit.max_userptr_mem_limit = mem - (mem >> 2);
108         pr_debug("Kernel memory limit %lluM, userptr limit %lluM\n",
109                 (kfd_mem_limit.max_system_mem_limit >> 20),
110                 (kfd_mem_limit.max_userptr_mem_limit >> 20));
111 }
112
113 static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
114                                               uint64_t size, u32 domain)
115 {
116         size_t acc_size;
117         int ret = 0;
118
119         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
120                                        sizeof(struct amdgpu_bo));
121
122         spin_lock(&kfd_mem_limit.mem_limit_lock);
123         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
124                 if (kfd_mem_limit.system_mem_used + (acc_size + size) >
125                         kfd_mem_limit.max_system_mem_limit) {
126                         ret = -ENOMEM;
127                         goto err_no_mem;
128                 }
129                 kfd_mem_limit.system_mem_used += (acc_size + size);
130         } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
131                 if ((kfd_mem_limit.system_mem_used + acc_size >
132                         kfd_mem_limit.max_system_mem_limit) ||
133                         (kfd_mem_limit.userptr_mem_used + (size + acc_size) >
134                         kfd_mem_limit.max_userptr_mem_limit)) {
135                         ret = -ENOMEM;
136                         goto err_no_mem;
137                 }
138                 kfd_mem_limit.system_mem_used += acc_size;
139                 kfd_mem_limit.userptr_mem_used += size;
140         }
141 err_no_mem:
142         spin_unlock(&kfd_mem_limit.mem_limit_lock);
143         return ret;
144 }
145
146 static void unreserve_system_mem_limit(struct amdgpu_device *adev,
147                                        uint64_t size, u32 domain)
148 {
149         size_t acc_size;
150
151         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
152                                        sizeof(struct amdgpu_bo));
153
154         spin_lock(&kfd_mem_limit.mem_limit_lock);
155         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
156                 kfd_mem_limit.system_mem_used -= (acc_size + size);
157         } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
158                 kfd_mem_limit.system_mem_used -= acc_size;
159                 kfd_mem_limit.userptr_mem_used -= size;
160         }
161         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
162                   "kfd system memory accounting unbalanced");
163         WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
164                   "kfd userptr memory accounting unbalanced");
165
166         spin_unlock(&kfd_mem_limit.mem_limit_lock);
167 }
168
169 void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
170 {
171         spin_lock(&kfd_mem_limit.mem_limit_lock);
172
173         if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
174                 kfd_mem_limit.system_mem_used -= bo->tbo.acc_size;
175                 kfd_mem_limit.userptr_mem_used -= amdgpu_bo_size(bo);
176         } else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
177                 kfd_mem_limit.system_mem_used -=
178                         (bo->tbo.acc_size + amdgpu_bo_size(bo));
179         }
180         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
181                   "kfd system memory accounting unbalanced");
182         WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
183                   "kfd userptr memory accounting unbalanced");
184
185         spin_unlock(&kfd_mem_limit.mem_limit_lock);
186 }
187
188
189 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence(s) from BO's
190  *  reservation object.
191  *
192  * @bo: [IN] Remove eviction fence(s) from this BO
193  * @ef: [IN] If ef is specified, then this eviction fence is removed if it
194  *  is present in the shared list.
195  * @ef_list: [OUT] Returns list of eviction fences. These fences are removed
196  *  from BO's reservation object shared list.
197  * @ef_count: [OUT] Number of fences in ef_list.
198  *
199  * NOTE: If called with ef_list, then amdgpu_amdkfd_add_eviction_fence must be
200  *  called to restore the eviction fences and to avoid memory leak. This is
201  *  useful for shared BOs.
202  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
203  */
204 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
205                                         struct amdgpu_amdkfd_fence *ef,
206                                         struct amdgpu_amdkfd_fence ***ef_list,
207                                         unsigned int *ef_count)
208 {
209         struct reservation_object_list *fobj;
210         struct reservation_object *resv;
211         unsigned int i = 0, j = 0, k = 0, shared_count;
212         unsigned int count = 0;
213         struct amdgpu_amdkfd_fence **fence_list;
214
215         if (!ef && !ef_list)
216                 return -EINVAL;
217
218         if (ef_list) {
219                 *ef_list = NULL;
220                 *ef_count = 0;
221         }
222
223         resv = bo->tbo.resv;
224         fobj = reservation_object_get_list(resv);
225
226         if (!fobj)
227                 return 0;
228
229         preempt_disable();
230         write_seqcount_begin(&resv->seq);
231
232         /* Go through all the shared fences in the resevation object. If
233          * ef is specified and it exists in the list, remove it and reduce the
234          * count. If ef is not specified, then get the count of eviction fences
235          * present.
236          */
237         shared_count = fobj->shared_count;
238         for (i = 0; i < shared_count; ++i) {
239                 struct dma_fence *f;
240
241                 f = rcu_dereference_protected(fobj->shared[i],
242                                               reservation_object_held(resv));
243
244                 if (ef) {
245                         if (f->context == ef->base.context) {
246                                 dma_fence_put(f);
247                                 fobj->shared_count--;
248                         } else {
249                                 RCU_INIT_POINTER(fobj->shared[j++], f);
250                         }
251                 } else if (to_amdgpu_amdkfd_fence(f))
252                         count++;
253         }
254         write_seqcount_end(&resv->seq);
255         preempt_enable();
256
257         if (ef || !count)
258                 return 0;
259
260         /* Alloc memory for count number of eviction fence pointers. Fill the
261          * ef_list array and ef_count
262          */
263         fence_list = kcalloc(count, sizeof(struct amdgpu_amdkfd_fence *),
264                              GFP_KERNEL);
265         if (!fence_list)
266                 return -ENOMEM;
267
268         preempt_disable();
269         write_seqcount_begin(&resv->seq);
270
271         j = 0;
272         for (i = 0; i < shared_count; ++i) {
273                 struct dma_fence *f;
274                 struct amdgpu_amdkfd_fence *efence;
275
276                 f = rcu_dereference_protected(fobj->shared[i],
277                         reservation_object_held(resv));
278
279                 efence = to_amdgpu_amdkfd_fence(f);
280                 if (efence) {
281                         fence_list[k++] = efence;
282                         fobj->shared_count--;
283                 } else {
284                         RCU_INIT_POINTER(fobj->shared[j++], f);
285                 }
286         }
287
288         write_seqcount_end(&resv->seq);
289         preempt_enable();
290
291         *ef_list = fence_list;
292         *ef_count = k;
293
294         return 0;
295 }
296
297 /* amdgpu_amdkfd_add_eviction_fence - Adds eviction fence(s) back into BO's
298  *  reservation object.
299  *
300  * @bo: [IN] Add eviction fences to this BO
301  * @ef_list: [IN] List of eviction fences to be added
302  * @ef_count: [IN] Number of fences in ef_list.
303  *
304  * NOTE: Must call amdgpu_amdkfd_remove_eviction_fence before calling this
305  *  function.
306  */
307 static void amdgpu_amdkfd_add_eviction_fence(struct amdgpu_bo *bo,
308                                 struct amdgpu_amdkfd_fence **ef_list,
309                                 unsigned int ef_count)
310 {
311         int i;
312
313         if (!ef_list || !ef_count)
314                 return;
315
316         for (i = 0; i < ef_count; i++) {
317                 amdgpu_bo_fence(bo, &ef_list[i]->base, true);
318                 /* Re-adding the fence takes an additional reference. Drop that
319                  * reference.
320                  */
321                 dma_fence_put(&ef_list[i]->base);
322         }
323
324         kfree(ef_list);
325 }
326
327 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
328                                      bool wait)
329 {
330         struct ttm_operation_ctx ctx = { false, false };
331         int ret;
332
333         if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
334                  "Called with userptr BO"))
335                 return -EINVAL;
336
337         amdgpu_bo_placement_from_domain(bo, domain);
338
339         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
340         if (ret)
341                 goto validate_fail;
342         if (wait) {
343                 struct amdgpu_amdkfd_fence **ef_list;
344                 unsigned int ef_count;
345
346                 ret = amdgpu_amdkfd_remove_eviction_fence(bo, NULL, &ef_list,
347                                                           &ef_count);
348                 if (ret)
349                         goto validate_fail;
350
351                 ttm_bo_wait(&bo->tbo, false, false);
352                 amdgpu_amdkfd_add_eviction_fence(bo, ef_list, ef_count);
353         }
354
355 validate_fail:
356         return ret;
357 }
358
359 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
360 {
361         struct amdgpu_vm_parser *p = param;
362
363         return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
364 }
365
366 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
367  *
368  * Page directories are not updated here because huge page handling
369  * during page table updates can invalidate page directory entries
370  * again. Page directories are only updated after updating page
371  * tables.
372  */
373 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
374 {
375         struct amdgpu_bo *pd = vm->root.base.bo;
376         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
377         struct amdgpu_vm_parser param;
378         uint64_t addr, flags = AMDGPU_PTE_VALID;
379         int ret;
380
381         param.domain = AMDGPU_GEM_DOMAIN_VRAM;
382         param.wait = false;
383
384         ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
385                                         &param);
386         if (ret) {
387                 pr_err("amdgpu: failed to validate PT BOs\n");
388                 return ret;
389         }
390
391         ret = amdgpu_amdkfd_validate(&param, pd);
392         if (ret) {
393                 pr_err("amdgpu: failed to validate PD\n");
394                 return ret;
395         }
396
397         addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
398         amdgpu_gmc_get_vm_pde(adev, -1, &addr, &flags);
399         vm->pd_phys_addr = addr;
400
401         if (vm->use_cpu_for_update) {
402                 ret = amdgpu_bo_kmap(pd, NULL);
403                 if (ret) {
404                         pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
405                         return ret;
406                 }
407         }
408
409         return 0;
410 }
411
412 static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
413                          struct dma_fence *f)
414 {
415         int ret = amdgpu_sync_fence(adev, sync, f, false);
416
417         /* Sync objects can't handle multiple GPUs (contexts) updating
418          * sync->last_vm_update. Fortunately we don't need it for
419          * KFD's purposes, so we can just drop that fence.
420          */
421         if (sync->last_vm_update) {
422                 dma_fence_put(sync->last_vm_update);
423                 sync->last_vm_update = NULL;
424         }
425
426         return ret;
427 }
428
429 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
430 {
431         struct amdgpu_bo *pd = vm->root.base.bo;
432         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
433         int ret;
434
435         ret = amdgpu_vm_update_directories(adev, vm);
436         if (ret)
437                 return ret;
438
439         return sync_vm_fence(adev, sync, vm->last_update);
440 }
441
442 /* add_bo_to_vm - Add a BO to a VM
443  *
444  * Everything that needs to bo done only once when a BO is first added
445  * to a VM. It can later be mapped and unmapped many times without
446  * repeating these steps.
447  *
448  * 1. Allocate and initialize BO VA entry data structure
449  * 2. Add BO to the VM
450  * 3. Determine ASIC-specific PTE flags
451  * 4. Alloc page tables and directories if needed
452  * 4a.  Validate new page tables and directories
453  */
454 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
455                 struct amdgpu_vm *vm, bool is_aql,
456                 struct kfd_bo_va_list **p_bo_va_entry)
457 {
458         int ret;
459         struct kfd_bo_va_list *bo_va_entry;
460         struct amdgpu_bo *pd = vm->root.base.bo;
461         struct amdgpu_bo *bo = mem->bo;
462         uint64_t va = mem->va;
463         struct list_head *list_bo_va = &mem->bo_va_list;
464         unsigned long bo_size = bo->tbo.mem.size;
465
466         if (!va) {
467                 pr_err("Invalid VA when adding BO to VM\n");
468                 return -EINVAL;
469         }
470
471         if (is_aql)
472                 va += bo_size;
473
474         bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
475         if (!bo_va_entry)
476                 return -ENOMEM;
477
478         pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
479                         va + bo_size, vm);
480
481         /* Add BO to VM internal data structures*/
482         bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
483         if (!bo_va_entry->bo_va) {
484                 ret = -EINVAL;
485                 pr_err("Failed to add BO object to VM. ret == %d\n",
486                                 ret);
487                 goto err_vmadd;
488         }
489
490         bo_va_entry->va = va;
491         bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
492                                                          mem->mapping_flags);
493         bo_va_entry->kgd_dev = (void *)adev;
494         list_add(&bo_va_entry->bo_list, list_bo_va);
495
496         if (p_bo_va_entry)
497                 *p_bo_va_entry = bo_va_entry;
498
499         /* Allocate new page tables if needed and validate
500          * them. Clearing of new page tables and validate need to wait
501          * on move fences. We don't want that to trigger the eviction
502          * fence, so remove it temporarily.
503          */
504         amdgpu_amdkfd_remove_eviction_fence(pd,
505                                         vm->process_info->eviction_fence,
506                                         NULL, NULL);
507
508         ret = amdgpu_vm_alloc_pts(adev, vm, va, amdgpu_bo_size(bo));
509         if (ret) {
510                 pr_err("Failed to allocate pts, err=%d\n", ret);
511                 goto err_alloc_pts;
512         }
513
514         ret = vm_validate_pt_pd_bos(vm);
515         if (ret) {
516                 pr_err("validate_pt_pd_bos() failed\n");
517                 goto err_alloc_pts;
518         }
519
520         /* Add the eviction fence back */
521         amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
522
523         return 0;
524
525 err_alloc_pts:
526         amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
527         amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
528         list_del(&bo_va_entry->bo_list);
529 err_vmadd:
530         kfree(bo_va_entry);
531         return ret;
532 }
533
534 static void remove_bo_from_vm(struct amdgpu_device *adev,
535                 struct kfd_bo_va_list *entry, unsigned long size)
536 {
537         pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
538                         entry->va,
539                         entry->va + size, entry);
540         amdgpu_vm_bo_rmv(adev, entry->bo_va);
541         list_del(&entry->bo_list);
542         kfree(entry);
543 }
544
545 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
546                                 struct amdkfd_process_info *process_info,
547                                 bool userptr)
548 {
549         struct ttm_validate_buffer *entry = &mem->validate_list;
550         struct amdgpu_bo *bo = mem->bo;
551
552         INIT_LIST_HEAD(&entry->head);
553         entry->shared = true;
554         entry->bo = &bo->tbo;
555         mutex_lock(&process_info->lock);
556         if (userptr)
557                 list_add_tail(&entry->head, &process_info->userptr_valid_list);
558         else
559                 list_add_tail(&entry->head, &process_info->kfd_bo_list);
560         mutex_unlock(&process_info->lock);
561 }
562
563 /* Initializes user pages. It registers the MMU notifier and validates
564  * the userptr BO in the GTT domain.
565  *
566  * The BO must already be on the userptr_valid_list. Otherwise an
567  * eviction and restore may happen that leaves the new BO unmapped
568  * with the user mode queues running.
569  *
570  * Takes the process_info->lock to protect against concurrent restore
571  * workers.
572  *
573  * Returns 0 for success, negative errno for errors.
574  */
575 static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
576                            uint64_t user_addr)
577 {
578         struct amdkfd_process_info *process_info = mem->process_info;
579         struct amdgpu_bo *bo = mem->bo;
580         struct ttm_operation_ctx ctx = { true, false };
581         int ret = 0;
582
583         mutex_lock(&process_info->lock);
584
585         ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
586         if (ret) {
587                 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
588                 goto out;
589         }
590
591         ret = amdgpu_mn_register(bo, user_addr);
592         if (ret) {
593                 pr_err("%s: Failed to register MMU notifier: %d\n",
594                        __func__, ret);
595                 goto out;
596         }
597
598         /* If no restore worker is running concurrently, user_pages
599          * should not be allocated
600          */
601         WARN(mem->user_pages, "Leaking user_pages array");
602
603         mem->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
604                                            sizeof(struct page *),
605                                            GFP_KERNEL | __GFP_ZERO);
606         if (!mem->user_pages) {
607                 pr_err("%s: Failed to allocate pages array\n", __func__);
608                 ret = -ENOMEM;
609                 goto unregister_out;
610         }
611
612         ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, mem->user_pages);
613         if (ret) {
614                 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
615                 goto free_out;
616         }
617
618         amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->user_pages);
619
620         ret = amdgpu_bo_reserve(bo, true);
621         if (ret) {
622                 pr_err("%s: Failed to reserve BO\n", __func__);
623                 goto release_out;
624         }
625         amdgpu_bo_placement_from_domain(bo, mem->domain);
626         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
627         if (ret)
628                 pr_err("%s: failed to validate BO\n", __func__);
629         amdgpu_bo_unreserve(bo);
630
631 release_out:
632         if (ret)
633                 release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
634 free_out:
635         kvfree(mem->user_pages);
636         mem->user_pages = NULL;
637 unregister_out:
638         if (ret)
639                 amdgpu_mn_unregister(bo);
640 out:
641         mutex_unlock(&process_info->lock);
642         return ret;
643 }
644
645 /* Reserving a BO and its page table BOs must happen atomically to
646  * avoid deadlocks. Some operations update multiple VMs at once. Track
647  * all the reservation info in a context structure. Optionally a sync
648  * object can track VM updates.
649  */
650 struct bo_vm_reservation_context {
651         struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
652         unsigned int n_vms;                 /* Number of VMs reserved       */
653         struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
654         struct ww_acquire_ctx ticket;       /* Reservation ticket           */
655         struct list_head list, duplicates;  /* BO lists                     */
656         struct amdgpu_sync *sync;           /* Pointer to sync object       */
657         bool reserved;                      /* Whether BOs are reserved     */
658 };
659
660 enum bo_vm_match {
661         BO_VM_NOT_MAPPED = 0,   /* Match VMs where a BO is not mapped */
662         BO_VM_MAPPED,           /* Match VMs where a BO is mapped     */
663         BO_VM_ALL,              /* Match all VMs a BO was added to    */
664 };
665
666 /**
667  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
668  * @mem: KFD BO structure.
669  * @vm: the VM to reserve.
670  * @ctx: the struct that will be used in unreserve_bo_and_vms().
671  */
672 static int reserve_bo_and_vm(struct kgd_mem *mem,
673                               struct amdgpu_vm *vm,
674                               struct bo_vm_reservation_context *ctx)
675 {
676         struct amdgpu_bo *bo = mem->bo;
677         int ret;
678
679         WARN_ON(!vm);
680
681         ctx->reserved = false;
682         ctx->n_vms = 1;
683         ctx->sync = &mem->sync;
684
685         INIT_LIST_HEAD(&ctx->list);
686         INIT_LIST_HEAD(&ctx->duplicates);
687
688         ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
689         if (!ctx->vm_pd)
690                 return -ENOMEM;
691
692         ctx->kfd_bo.robj = bo;
693         ctx->kfd_bo.priority = 0;
694         ctx->kfd_bo.tv.bo = &bo->tbo;
695         ctx->kfd_bo.tv.shared = true;
696         ctx->kfd_bo.user_pages = NULL;
697         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
698
699         amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
700
701         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
702                                      false, &ctx->duplicates);
703         if (!ret)
704                 ctx->reserved = true;
705         else {
706                 pr_err("Failed to reserve buffers in ttm\n");
707                 kfree(ctx->vm_pd);
708                 ctx->vm_pd = NULL;
709         }
710
711         return ret;
712 }
713
714 /**
715  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
716  * @mem: KFD BO structure.
717  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
718  * is used. Otherwise, a single VM associated with the BO.
719  * @map_type: the mapping status that will be used to filter the VMs.
720  * @ctx: the struct that will be used in unreserve_bo_and_vms().
721  *
722  * Returns 0 for success, negative for failure.
723  */
724 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
725                                 struct amdgpu_vm *vm, enum bo_vm_match map_type,
726                                 struct bo_vm_reservation_context *ctx)
727 {
728         struct amdgpu_bo *bo = mem->bo;
729         struct kfd_bo_va_list *entry;
730         unsigned int i;
731         int ret;
732
733         ctx->reserved = false;
734         ctx->n_vms = 0;
735         ctx->vm_pd = NULL;
736         ctx->sync = &mem->sync;
737
738         INIT_LIST_HEAD(&ctx->list);
739         INIT_LIST_HEAD(&ctx->duplicates);
740
741         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
742                 if ((vm && vm != entry->bo_va->base.vm) ||
743                         (entry->is_mapped != map_type
744                         && map_type != BO_VM_ALL))
745                         continue;
746
747                 ctx->n_vms++;
748         }
749
750         if (ctx->n_vms != 0) {
751                 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
752                                      GFP_KERNEL);
753                 if (!ctx->vm_pd)
754                         return -ENOMEM;
755         }
756
757         ctx->kfd_bo.robj = bo;
758         ctx->kfd_bo.priority = 0;
759         ctx->kfd_bo.tv.bo = &bo->tbo;
760         ctx->kfd_bo.tv.shared = true;
761         ctx->kfd_bo.user_pages = NULL;
762         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
763
764         i = 0;
765         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
766                 if ((vm && vm != entry->bo_va->base.vm) ||
767                         (entry->is_mapped != map_type
768                         && map_type != BO_VM_ALL))
769                         continue;
770
771                 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
772                                 &ctx->vm_pd[i]);
773                 i++;
774         }
775
776         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
777                                      false, &ctx->duplicates);
778         if (!ret)
779                 ctx->reserved = true;
780         else
781                 pr_err("Failed to reserve buffers in ttm.\n");
782
783         if (ret) {
784                 kfree(ctx->vm_pd);
785                 ctx->vm_pd = NULL;
786         }
787
788         return ret;
789 }
790
791 /**
792  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
793  * @ctx: Reservation context to unreserve
794  * @wait: Optionally wait for a sync object representing pending VM updates
795  * @intr: Whether the wait is interruptible
796  *
797  * Also frees any resources allocated in
798  * reserve_bo_and_(cond_)vm(s). Returns the status from
799  * amdgpu_sync_wait.
800  */
801 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
802                                  bool wait, bool intr)
803 {
804         int ret = 0;
805
806         if (wait)
807                 ret = amdgpu_sync_wait(ctx->sync, intr);
808
809         if (ctx->reserved)
810                 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
811         kfree(ctx->vm_pd);
812
813         ctx->sync = NULL;
814
815         ctx->reserved = false;
816         ctx->vm_pd = NULL;
817
818         return ret;
819 }
820
821 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
822                                 struct kfd_bo_va_list *entry,
823                                 struct amdgpu_sync *sync)
824 {
825         struct amdgpu_bo_va *bo_va = entry->bo_va;
826         struct amdgpu_vm *vm = bo_va->base.vm;
827         struct amdgpu_bo *pd = vm->root.base.bo;
828
829         /* Remove eviction fence from PD (and thereby from PTs too as
830          * they share the resv. object). Otherwise during PT update
831          * job (see amdgpu_vm_bo_update_mapping), eviction fence would
832          * get added to job->sync object and job execution would
833          * trigger the eviction fence.
834          */
835         amdgpu_amdkfd_remove_eviction_fence(pd,
836                                             vm->process_info->eviction_fence,
837                                             NULL, NULL);
838         amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
839
840         amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
841
842         /* Add the eviction fence back */
843         amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
844
845         sync_vm_fence(adev, sync, bo_va->last_pt_update);
846
847         return 0;
848 }
849
850 static int update_gpuvm_pte(struct amdgpu_device *adev,
851                 struct kfd_bo_va_list *entry,
852                 struct amdgpu_sync *sync)
853 {
854         int ret;
855         struct amdgpu_vm *vm;
856         struct amdgpu_bo_va *bo_va;
857         struct amdgpu_bo *bo;
858
859         bo_va = entry->bo_va;
860         vm = bo_va->base.vm;
861         bo = bo_va->base.bo;
862
863         /* Update the page tables  */
864         ret = amdgpu_vm_bo_update(adev, bo_va, false);
865         if (ret) {
866                 pr_err("amdgpu_vm_bo_update failed\n");
867                 return ret;
868         }
869
870         return sync_vm_fence(adev, sync, bo_va->last_pt_update);
871 }
872
873 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
874                 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
875                 bool no_update_pte)
876 {
877         int ret;
878
879         /* Set virtual address for the allocation */
880         ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
881                                amdgpu_bo_size(entry->bo_va->base.bo),
882                                entry->pte_flags);
883         if (ret) {
884                 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
885                                 entry->va, ret);
886                 return ret;
887         }
888
889         if (no_update_pte)
890                 return 0;
891
892         ret = update_gpuvm_pte(adev, entry, sync);
893         if (ret) {
894                 pr_err("update_gpuvm_pte() failed\n");
895                 goto update_gpuvm_pte_failed;
896         }
897
898         return 0;
899
900 update_gpuvm_pte_failed:
901         unmap_bo_from_gpuvm(adev, entry, sync);
902         return ret;
903 }
904
905 static int process_validate_vms(struct amdkfd_process_info *process_info)
906 {
907         struct amdgpu_vm *peer_vm;
908         int ret;
909
910         list_for_each_entry(peer_vm, &process_info->vm_list_head,
911                             vm_list_node) {
912                 ret = vm_validate_pt_pd_bos(peer_vm);
913                 if (ret)
914                         return ret;
915         }
916
917         return 0;
918 }
919
920 static int process_update_pds(struct amdkfd_process_info *process_info,
921                               struct amdgpu_sync *sync)
922 {
923         struct amdgpu_vm *peer_vm;
924         int ret;
925
926         list_for_each_entry(peer_vm, &process_info->vm_list_head,
927                             vm_list_node) {
928                 ret = vm_update_pds(peer_vm, sync);
929                 if (ret)
930                         return ret;
931         }
932
933         return 0;
934 }
935
936 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
937                        struct dma_fence **ef)
938 {
939         struct amdkfd_process_info *info = NULL;
940         int ret;
941
942         if (!*process_info) {
943                 info = kzalloc(sizeof(*info), GFP_KERNEL);
944                 if (!info)
945                         return -ENOMEM;
946
947                 mutex_init(&info->lock);
948                 INIT_LIST_HEAD(&info->vm_list_head);
949                 INIT_LIST_HEAD(&info->kfd_bo_list);
950                 INIT_LIST_HEAD(&info->userptr_valid_list);
951                 INIT_LIST_HEAD(&info->userptr_inval_list);
952
953                 info->eviction_fence =
954                         amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
955                                                    current->mm);
956                 if (!info->eviction_fence) {
957                         pr_err("Failed to create eviction fence\n");
958                         ret = -ENOMEM;
959                         goto create_evict_fence_fail;
960                 }
961
962                 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
963                 atomic_set(&info->evicted_bos, 0);
964                 INIT_DELAYED_WORK(&info->restore_userptr_work,
965                                   amdgpu_amdkfd_restore_userptr_worker);
966
967                 *process_info = info;
968                 *ef = dma_fence_get(&info->eviction_fence->base);
969         }
970
971         vm->process_info = *process_info;
972
973         /* Validate page directory and attach eviction fence */
974         ret = amdgpu_bo_reserve(vm->root.base.bo, true);
975         if (ret)
976                 goto reserve_pd_fail;
977         ret = vm_validate_pt_pd_bos(vm);
978         if (ret) {
979                 pr_err("validate_pt_pd_bos() failed\n");
980                 goto validate_pd_fail;
981         }
982         ret = ttm_bo_wait(&vm->root.base.bo->tbo, false, false);
983         if (ret)
984                 goto wait_pd_fail;
985         amdgpu_bo_fence(vm->root.base.bo,
986                         &vm->process_info->eviction_fence->base, true);
987         amdgpu_bo_unreserve(vm->root.base.bo);
988
989         /* Update process info */
990         mutex_lock(&vm->process_info->lock);
991         list_add_tail(&vm->vm_list_node,
992                         &(vm->process_info->vm_list_head));
993         vm->process_info->n_vms++;
994         mutex_unlock(&vm->process_info->lock);
995
996         return 0;
997
998 wait_pd_fail:
999 validate_pd_fail:
1000         amdgpu_bo_unreserve(vm->root.base.bo);
1001 reserve_pd_fail:
1002         vm->process_info = NULL;
1003         if (info) {
1004                 /* Two fence references: one in info and one in *ef */
1005                 dma_fence_put(&info->eviction_fence->base);
1006                 dma_fence_put(*ef);
1007                 *ef = NULL;
1008                 *process_info = NULL;
1009                 put_pid(info->pid);
1010 create_evict_fence_fail:
1011                 mutex_destroy(&info->lock);
1012                 kfree(info);
1013         }
1014         return ret;
1015 }
1016
1017 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm,
1018                                           void **process_info,
1019                                           struct dma_fence **ef)
1020 {
1021         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1022         struct amdgpu_vm *new_vm;
1023         int ret;
1024
1025         new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
1026         if (!new_vm)
1027                 return -ENOMEM;
1028
1029         /* Initialize AMDGPU part of the VM */
1030         ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, 0);
1031         if (ret) {
1032                 pr_err("Failed init vm ret %d\n", ret);
1033                 goto amdgpu_vm_init_fail;
1034         }
1035
1036         /* Initialize KFD part of the VM and process info */
1037         ret = init_kfd_vm(new_vm, process_info, ef);
1038         if (ret)
1039                 goto init_kfd_vm_fail;
1040
1041         *vm = (void *) new_vm;
1042
1043         return 0;
1044
1045 init_kfd_vm_fail:
1046         amdgpu_vm_fini(adev, new_vm);
1047 amdgpu_vm_init_fail:
1048         kfree(new_vm);
1049         return ret;
1050 }
1051
1052 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1053                                            struct file *filp,
1054                                            void **vm, void **process_info,
1055                                            struct dma_fence **ef)
1056 {
1057         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1058         struct drm_file *drm_priv = filp->private_data;
1059         struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
1060         struct amdgpu_vm *avm = &drv_priv->vm;
1061         int ret;
1062
1063         /* Already a compute VM? */
1064         if (avm->process_info)
1065                 return -EINVAL;
1066
1067         /* Convert VM into a compute VM */
1068         ret = amdgpu_vm_make_compute(adev, avm);
1069         if (ret)
1070                 return ret;
1071
1072         /* Initialize KFD part of the VM and process info */
1073         ret = init_kfd_vm(avm, process_info, ef);
1074         if (ret)
1075                 return ret;
1076
1077         *vm = (void *)avm;
1078
1079         return 0;
1080 }
1081
1082 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1083                                     struct amdgpu_vm *vm)
1084 {
1085         struct amdkfd_process_info *process_info = vm->process_info;
1086         struct amdgpu_bo *pd = vm->root.base.bo;
1087
1088         if (!process_info)
1089                 return;
1090
1091         /* Release eviction fence from PD */
1092         amdgpu_bo_reserve(pd, false);
1093         amdgpu_bo_fence(pd, NULL, false);
1094         amdgpu_bo_unreserve(pd);
1095
1096         /* Update process info */
1097         mutex_lock(&process_info->lock);
1098         process_info->n_vms--;
1099         list_del(&vm->vm_list_node);
1100         mutex_unlock(&process_info->lock);
1101
1102         /* Release per-process resources when last compute VM is destroyed */
1103         if (!process_info->n_vms) {
1104                 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1105                 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1106                 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1107
1108                 dma_fence_put(&process_info->eviction_fence->base);
1109                 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1110                 put_pid(process_info->pid);
1111                 mutex_destroy(&process_info->lock);
1112                 kfree(process_info);
1113         }
1114 }
1115
1116 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1117 {
1118         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1119         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1120
1121         if (WARN_ON(!kgd || !vm))
1122                 return;
1123
1124         pr_debug("Destroying process vm %p\n", vm);
1125
1126         /* Release the VM context */
1127         amdgpu_vm_fini(adev, avm);
1128         kfree(vm);
1129 }
1130
1131 uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1132 {
1133         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1134
1135         return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1136 }
1137
1138 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1139                 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1140                 void *vm, struct kgd_mem **mem,
1141                 uint64_t *offset, uint32_t flags)
1142 {
1143         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1144         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1145         uint64_t user_addr = 0;
1146         struct amdgpu_bo *bo;
1147         struct amdgpu_bo_param bp;
1148         int byte_align;
1149         u32 domain, alloc_domain;
1150         u64 alloc_flags;
1151         uint32_t mapping_flags;
1152         int ret;
1153
1154         /*
1155          * Check on which domain to allocate BO
1156          */
1157         if (flags & ALLOC_MEM_FLAGS_VRAM) {
1158                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1159                 alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED;
1160                 alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
1161                         AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1162                         AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1163         } else if (flags & ALLOC_MEM_FLAGS_GTT) {
1164                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1165                 alloc_flags = 0;
1166         } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
1167                 domain = AMDGPU_GEM_DOMAIN_GTT;
1168                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1169                 alloc_flags = 0;
1170                 if (!offset || !*offset)
1171                         return -EINVAL;
1172                 user_addr = *offset;
1173         } else {
1174                 return -EINVAL;
1175         }
1176
1177         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1178         if (!*mem)
1179                 return -ENOMEM;
1180         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1181         mutex_init(&(*mem)->lock);
1182         (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1183
1184         /* Workaround for AQL queue wraparound bug. Map the same
1185          * memory twice. That means we only actually allocate half
1186          * the memory.
1187          */
1188         if ((*mem)->aql_queue)
1189                 size = size >> 1;
1190
1191         /* Workaround for TLB bug on older VI chips */
1192         byte_align = (adev->family == AMDGPU_FAMILY_VI &&
1193                         adev->asic_type != CHIP_FIJI &&
1194                         adev->asic_type != CHIP_POLARIS10 &&
1195                         adev->asic_type != CHIP_POLARIS11) ?
1196                         VI_BO_SIZE_ALIGN : 1;
1197
1198         mapping_flags = AMDGPU_VM_PAGE_READABLE;
1199         if (flags & ALLOC_MEM_FLAGS_WRITABLE)
1200                 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
1201         if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
1202                 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1203         if (flags & ALLOC_MEM_FLAGS_COHERENT)
1204                 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1205         else
1206                 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1207         (*mem)->mapping_flags = mapping_flags;
1208
1209         amdgpu_sync_create(&(*mem)->sync);
1210
1211         ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size, alloc_domain);
1212         if (ret) {
1213                 pr_debug("Insufficient system memory\n");
1214                 goto err_reserve_system_mem;
1215         }
1216
1217         pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1218                         va, size, domain_string(alloc_domain));
1219
1220         memset(&bp, 0, sizeof(bp));
1221         bp.size = size;
1222         bp.byte_align = byte_align;
1223         bp.domain = alloc_domain;
1224         bp.flags = alloc_flags;
1225         bp.type = ttm_bo_type_device;
1226         bp.resv = NULL;
1227         ret = amdgpu_bo_create(adev, &bp, &bo);
1228         if (ret) {
1229                 pr_debug("Failed to create BO on domain %s. ret %d\n",
1230                                 domain_string(alloc_domain), ret);
1231                 goto err_bo_create;
1232         }
1233         bo->kfd_bo = *mem;
1234         (*mem)->bo = bo;
1235         if (user_addr)
1236                 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1237
1238         (*mem)->va = va;
1239         (*mem)->domain = domain;
1240         (*mem)->mapped_to_gpu_memory = 0;
1241         (*mem)->process_info = avm->process_info;
1242         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1243
1244         if (user_addr) {
1245                 ret = init_user_pages(*mem, current->mm, user_addr);
1246                 if (ret) {
1247                         mutex_lock(&avm->process_info->lock);
1248                         list_del(&(*mem)->validate_list.head);
1249                         mutex_unlock(&avm->process_info->lock);
1250                         goto allocate_init_user_pages_failed;
1251                 }
1252         }
1253
1254         if (offset)
1255                 *offset = amdgpu_bo_mmap_offset(bo);
1256
1257         return 0;
1258
1259 allocate_init_user_pages_failed:
1260         amdgpu_bo_unref(&bo);
1261         /* Don't unreserve system mem limit twice */
1262         goto err_reserve_system_mem;
1263 err_bo_create:
1264         unreserve_system_mem_limit(adev, size, alloc_domain);
1265 err_reserve_system_mem:
1266         mutex_destroy(&(*mem)->lock);
1267         kfree(*mem);
1268         return ret;
1269 }
1270
1271 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1272                 struct kgd_dev *kgd, struct kgd_mem *mem)
1273 {
1274         struct amdkfd_process_info *process_info = mem->process_info;
1275         unsigned long bo_size = mem->bo->tbo.mem.size;
1276         struct kfd_bo_va_list *entry, *tmp;
1277         struct bo_vm_reservation_context ctx;
1278         struct ttm_validate_buffer *bo_list_entry;
1279         int ret;
1280
1281         mutex_lock(&mem->lock);
1282
1283         if (mem->mapped_to_gpu_memory > 0) {
1284                 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1285                                 mem->va, bo_size);
1286                 mutex_unlock(&mem->lock);
1287                 return -EBUSY;
1288         }
1289
1290         mutex_unlock(&mem->lock);
1291         /* lock is not needed after this, since mem is unused and will
1292          * be freed anyway
1293          */
1294
1295         /* No more MMU notifiers */
1296         amdgpu_mn_unregister(mem->bo);
1297
1298         /* Make sure restore workers don't access the BO any more */
1299         bo_list_entry = &mem->validate_list;
1300         mutex_lock(&process_info->lock);
1301         list_del(&bo_list_entry->head);
1302         mutex_unlock(&process_info->lock);
1303
1304         /* Free user pages if necessary */
1305         if (mem->user_pages) {
1306                 pr_debug("%s: Freeing user_pages array\n", __func__);
1307                 if (mem->user_pages[0])
1308                         release_pages(mem->user_pages,
1309                                         mem->bo->tbo.ttm->num_pages);
1310                 kvfree(mem->user_pages);
1311         }
1312
1313         ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1314         if (unlikely(ret))
1315                 return ret;
1316
1317         /* The eviction fence should be removed by the last unmap.
1318          * TODO: Log an error condition if the bo still has the eviction fence
1319          * attached
1320          */
1321         amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1322                                         process_info->eviction_fence,
1323                                         NULL, NULL);
1324         pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1325                 mem->va + bo_size * (1 + mem->aql_queue));
1326
1327         /* Remove from VM internal data structures */
1328         list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1329                 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1330                                 entry, bo_size);
1331
1332         ret = unreserve_bo_and_vms(&ctx, false, false);
1333
1334         /* Free the sync object */
1335         amdgpu_sync_free(&mem->sync);
1336
1337         /* Free the BO*/
1338         amdgpu_bo_unref(&mem->bo);
1339         mutex_destroy(&mem->lock);
1340         kfree(mem);
1341
1342         return ret;
1343 }
1344
1345 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1346                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1347 {
1348         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1349         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1350         int ret;
1351         struct amdgpu_bo *bo;
1352         uint32_t domain;
1353         struct kfd_bo_va_list *entry;
1354         struct bo_vm_reservation_context ctx;
1355         struct kfd_bo_va_list *bo_va_entry = NULL;
1356         struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1357         unsigned long bo_size;
1358         bool is_invalid_userptr = false;
1359
1360         bo = mem->bo;
1361         if (!bo) {
1362                 pr_err("Invalid BO when mapping memory to GPU\n");
1363                 return -EINVAL;
1364         }
1365
1366         /* Make sure restore is not running concurrently. Since we
1367          * don't map invalid userptr BOs, we rely on the next restore
1368          * worker to do the mapping
1369          */
1370         mutex_lock(&mem->process_info->lock);
1371
1372         /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1373          * sure that the MMU notifier is no longer running
1374          * concurrently and the queues are actually stopped
1375          */
1376         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1377                 down_write(&current->mm->mmap_sem);
1378                 is_invalid_userptr = atomic_read(&mem->invalid);
1379                 up_write(&current->mm->mmap_sem);
1380         }
1381
1382         mutex_lock(&mem->lock);
1383
1384         domain = mem->domain;
1385         bo_size = bo->tbo.mem.size;
1386
1387         pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1388                         mem->va,
1389                         mem->va + bo_size * (1 + mem->aql_queue),
1390                         vm, domain_string(domain));
1391
1392         ret = reserve_bo_and_vm(mem, vm, &ctx);
1393         if (unlikely(ret))
1394                 goto out;
1395
1396         /* Userptr can be marked as "not invalid", but not actually be
1397          * validated yet (still in the system domain). In that case
1398          * the queues are still stopped and we can leave mapping for
1399          * the next restore worker
1400          */
1401         if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1402                 is_invalid_userptr = true;
1403
1404         if (check_if_add_bo_to_vm(avm, mem)) {
1405                 ret = add_bo_to_vm(adev, mem, avm, false,
1406                                 &bo_va_entry);
1407                 if (ret)
1408                         goto add_bo_to_vm_failed;
1409                 if (mem->aql_queue) {
1410                         ret = add_bo_to_vm(adev, mem, avm,
1411                                         true, &bo_va_entry_aql);
1412                         if (ret)
1413                                 goto add_bo_to_vm_failed_aql;
1414                 }
1415         } else {
1416                 ret = vm_validate_pt_pd_bos(avm);
1417                 if (unlikely(ret))
1418                         goto add_bo_to_vm_failed;
1419         }
1420
1421         if (mem->mapped_to_gpu_memory == 0 &&
1422             !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1423                 /* Validate BO only once. The eviction fence gets added to BO
1424                  * the first time it is mapped. Validate will wait for all
1425                  * background evictions to complete.
1426                  */
1427                 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1428                 if (ret) {
1429                         pr_debug("Validate failed\n");
1430                         goto map_bo_to_gpuvm_failed;
1431                 }
1432         }
1433
1434         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1435                 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1436                         pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1437                                         entry->va, entry->va + bo_size,
1438                                         entry);
1439
1440                         ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1441                                               is_invalid_userptr);
1442                         if (ret) {
1443                                 pr_err("Failed to map radeon bo to gpuvm\n");
1444                                 goto map_bo_to_gpuvm_failed;
1445                         }
1446
1447                         ret = vm_update_pds(vm, ctx.sync);
1448                         if (ret) {
1449                                 pr_err("Failed to update page directories\n");
1450                                 goto map_bo_to_gpuvm_failed;
1451                         }
1452
1453                         entry->is_mapped = true;
1454                         mem->mapped_to_gpu_memory++;
1455                         pr_debug("\t INC mapping count %d\n",
1456                                         mem->mapped_to_gpu_memory);
1457                 }
1458         }
1459
1460         if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1461                 amdgpu_bo_fence(bo,
1462                                 &avm->process_info->eviction_fence->base,
1463                                 true);
1464         ret = unreserve_bo_and_vms(&ctx, false, false);
1465
1466         goto out;
1467
1468 map_bo_to_gpuvm_failed:
1469         if (bo_va_entry_aql)
1470                 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1471 add_bo_to_vm_failed_aql:
1472         if (bo_va_entry)
1473                 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1474 add_bo_to_vm_failed:
1475         unreserve_bo_and_vms(&ctx, false, false);
1476 out:
1477         mutex_unlock(&mem->process_info->lock);
1478         mutex_unlock(&mem->lock);
1479         return ret;
1480 }
1481
1482 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1483                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1484 {
1485         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1486         struct amdkfd_process_info *process_info =
1487                 ((struct amdgpu_vm *)vm)->process_info;
1488         unsigned long bo_size = mem->bo->tbo.mem.size;
1489         struct kfd_bo_va_list *entry;
1490         struct bo_vm_reservation_context ctx;
1491         int ret;
1492
1493         mutex_lock(&mem->lock);
1494
1495         ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1496         if (unlikely(ret))
1497                 goto out;
1498         /* If no VMs were reserved, it means the BO wasn't actually mapped */
1499         if (ctx.n_vms == 0) {
1500                 ret = -EINVAL;
1501                 goto unreserve_out;
1502         }
1503
1504         ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1505         if (unlikely(ret))
1506                 goto unreserve_out;
1507
1508         pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1509                 mem->va,
1510                 mem->va + bo_size * (1 + mem->aql_queue),
1511                 vm);
1512
1513         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1514                 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1515                         pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1516                                         entry->va,
1517                                         entry->va + bo_size,
1518                                         entry);
1519
1520                         ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1521                         if (ret == 0) {
1522                                 entry->is_mapped = false;
1523                         } else {
1524                                 pr_err("failed to unmap VA 0x%llx\n",
1525                                                 mem->va);
1526                                 goto unreserve_out;
1527                         }
1528
1529                         mem->mapped_to_gpu_memory--;
1530                         pr_debug("\t DEC mapping count %d\n",
1531                                         mem->mapped_to_gpu_memory);
1532                 }
1533         }
1534
1535         /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1536          * required.
1537          */
1538         if (mem->mapped_to_gpu_memory == 0 &&
1539             !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1540                 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1541                                                 process_info->eviction_fence,
1542                                                     NULL, NULL);
1543
1544 unreserve_out:
1545         unreserve_bo_and_vms(&ctx, false, false);
1546 out:
1547         mutex_unlock(&mem->lock);
1548         return ret;
1549 }
1550
1551 int amdgpu_amdkfd_gpuvm_sync_memory(
1552                 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1553 {
1554         struct amdgpu_sync sync;
1555         int ret;
1556
1557         amdgpu_sync_create(&sync);
1558
1559         mutex_lock(&mem->lock);
1560         amdgpu_sync_clone(&mem->sync, &sync);
1561         mutex_unlock(&mem->lock);
1562
1563         ret = amdgpu_sync_wait(&sync, intr);
1564         amdgpu_sync_free(&sync);
1565         return ret;
1566 }
1567
1568 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1569                 struct kgd_mem *mem, void **kptr, uint64_t *size)
1570 {
1571         int ret;
1572         struct amdgpu_bo *bo = mem->bo;
1573
1574         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1575                 pr_err("userptr can't be mapped to kernel\n");
1576                 return -EINVAL;
1577         }
1578
1579         /* delete kgd_mem from kfd_bo_list to avoid re-validating
1580          * this BO in BO's restoring after eviction.
1581          */
1582         mutex_lock(&mem->process_info->lock);
1583
1584         ret = amdgpu_bo_reserve(bo, true);
1585         if (ret) {
1586                 pr_err("Failed to reserve bo. ret %d\n", ret);
1587                 goto bo_reserve_failed;
1588         }
1589
1590         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1591         if (ret) {
1592                 pr_err("Failed to pin bo. ret %d\n", ret);
1593                 goto pin_failed;
1594         }
1595
1596         ret = amdgpu_bo_kmap(bo, kptr);
1597         if (ret) {
1598                 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1599                 goto kmap_failed;
1600         }
1601
1602         amdgpu_amdkfd_remove_eviction_fence(
1603                 bo, mem->process_info->eviction_fence, NULL, NULL);
1604         list_del_init(&mem->validate_list.head);
1605
1606         if (size)
1607                 *size = amdgpu_bo_size(bo);
1608
1609         amdgpu_bo_unreserve(bo);
1610
1611         mutex_unlock(&mem->process_info->lock);
1612         return 0;
1613
1614 kmap_failed:
1615         amdgpu_bo_unpin(bo);
1616 pin_failed:
1617         amdgpu_bo_unreserve(bo);
1618 bo_reserve_failed:
1619         mutex_unlock(&mem->process_info->lock);
1620
1621         return ret;
1622 }
1623
1624 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1625                                               struct kfd_vm_fault_info *mem)
1626 {
1627         struct amdgpu_device *adev;
1628
1629         adev = (struct amdgpu_device *)kgd;
1630         if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1631                 *mem = *adev->gmc.vm_fault_info;
1632                 mb();
1633                 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1634         }
1635         return 0;
1636 }
1637
1638 /* Evict a userptr BO by stopping the queues if necessary
1639  *
1640  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1641  * cannot do any memory allocations, and cannot take any locks that
1642  * are held elsewhere while allocating memory. Therefore this is as
1643  * simple as possible, using atomic counters.
1644  *
1645  * It doesn't do anything to the BO itself. The real work happens in
1646  * restore, where we get updated page addresses. This function only
1647  * ensures that GPU access to the BO is stopped.
1648  */
1649 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1650                                 struct mm_struct *mm)
1651 {
1652         struct amdkfd_process_info *process_info = mem->process_info;
1653         int invalid, evicted_bos;
1654         int r = 0;
1655
1656         invalid = atomic_inc_return(&mem->invalid);
1657         evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1658         if (evicted_bos == 1) {
1659                 /* First eviction, stop the queues */
1660                 r = kgd2kfd->quiesce_mm(mm);
1661                 if (r)
1662                         pr_err("Failed to quiesce KFD\n");
1663                 schedule_delayed_work(&process_info->restore_userptr_work,
1664                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1665         }
1666
1667         return r;
1668 }
1669
1670 /* Update invalid userptr BOs
1671  *
1672  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1673  * userptr_inval_list and updates user pages for all BOs that have
1674  * been invalidated since their last update.
1675  */
1676 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1677                                      struct mm_struct *mm)
1678 {
1679         struct kgd_mem *mem, *tmp_mem;
1680         struct amdgpu_bo *bo;
1681         struct ttm_operation_ctx ctx = { false, false };
1682         int invalid, ret;
1683
1684         /* Move all invalidated BOs to the userptr_inval_list and
1685          * release their user pages by migration to the CPU domain
1686          */
1687         list_for_each_entry_safe(mem, tmp_mem,
1688                                  &process_info->userptr_valid_list,
1689                                  validate_list.head) {
1690                 if (!atomic_read(&mem->invalid))
1691                         continue; /* BO is still valid */
1692
1693                 bo = mem->bo;
1694
1695                 if (amdgpu_bo_reserve(bo, true))
1696                         return -EAGAIN;
1697                 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1698                 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1699                 amdgpu_bo_unreserve(bo);
1700                 if (ret) {
1701                         pr_err("%s: Failed to invalidate userptr BO\n",
1702                                __func__);
1703                         return -EAGAIN;
1704                 }
1705
1706                 list_move_tail(&mem->validate_list.head,
1707                                &process_info->userptr_inval_list);
1708         }
1709
1710         if (list_empty(&process_info->userptr_inval_list))
1711                 return 0; /* All evicted userptr BOs were freed */
1712
1713         /* Go through userptr_inval_list and update any invalid user_pages */
1714         list_for_each_entry(mem, &process_info->userptr_inval_list,
1715                             validate_list.head) {
1716                 invalid = atomic_read(&mem->invalid);
1717                 if (!invalid)
1718                         /* BO hasn't been invalidated since the last
1719                          * revalidation attempt. Keep its BO list.
1720                          */
1721                         continue;
1722
1723                 bo = mem->bo;
1724
1725                 if (!mem->user_pages) {
1726                         mem->user_pages =
1727                                 kvmalloc_array(bo->tbo.ttm->num_pages,
1728                                                  sizeof(struct page *),
1729                                                  GFP_KERNEL | __GFP_ZERO);
1730                         if (!mem->user_pages) {
1731                                 pr_err("%s: Failed to allocate pages array\n",
1732                                        __func__);
1733                                 return -ENOMEM;
1734                         }
1735                 } else if (mem->user_pages[0]) {
1736                         release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
1737                 }
1738
1739                 /* Get updated user pages */
1740                 ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
1741                                                    mem->user_pages);
1742                 if (ret) {
1743                         mem->user_pages[0] = NULL;
1744                         pr_info("%s: Failed to get user pages: %d\n",
1745                                 __func__, ret);
1746                         /* Pretend it succeeded. It will fail later
1747                          * with a VM fault if the GPU tries to access
1748                          * it. Better than hanging indefinitely with
1749                          * stalled user mode queues.
1750                          */
1751                 }
1752
1753                 /* Mark the BO as valid unless it was invalidated
1754                  * again concurrently
1755                  */
1756                 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1757                         return -EAGAIN;
1758         }
1759
1760         return 0;
1761 }
1762
1763 /* Validate invalid userptr BOs
1764  *
1765  * Validates BOs on the userptr_inval_list, and moves them back to the
1766  * userptr_valid_list. Also updates GPUVM page tables with new page
1767  * addresses and waits for the page table updates to complete.
1768  */
1769 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1770 {
1771         struct amdgpu_bo_list_entry *pd_bo_list_entries;
1772         struct list_head resv_list, duplicates;
1773         struct ww_acquire_ctx ticket;
1774         struct amdgpu_sync sync;
1775
1776         struct amdgpu_vm *peer_vm;
1777         struct kgd_mem *mem, *tmp_mem;
1778         struct amdgpu_bo *bo;
1779         struct ttm_operation_ctx ctx = { false, false };
1780         int i, ret;
1781
1782         pd_bo_list_entries = kcalloc(process_info->n_vms,
1783                                      sizeof(struct amdgpu_bo_list_entry),
1784                                      GFP_KERNEL);
1785         if (!pd_bo_list_entries) {
1786                 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1787                 return -ENOMEM;
1788         }
1789
1790         INIT_LIST_HEAD(&resv_list);
1791         INIT_LIST_HEAD(&duplicates);
1792
1793         /* Get all the page directory BOs that need to be reserved */
1794         i = 0;
1795         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1796                             vm_list_node)
1797                 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1798                                     &pd_bo_list_entries[i++]);
1799         /* Add the userptr_inval_list entries to resv_list */
1800         list_for_each_entry(mem, &process_info->userptr_inval_list,
1801                             validate_list.head) {
1802                 list_add_tail(&mem->resv_list.head, &resv_list);
1803                 mem->resv_list.bo = mem->validate_list.bo;
1804                 mem->resv_list.shared = mem->validate_list.shared;
1805         }
1806
1807         /* Reserve all BOs and page tables for validation */
1808         ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1809         WARN(!list_empty(&duplicates), "Duplicates should be empty");
1810         if (ret)
1811                 goto out;
1812
1813         amdgpu_sync_create(&sync);
1814
1815         /* Avoid triggering eviction fences when unmapping invalid
1816          * userptr BOs (waits for all fences, doesn't use
1817          * FENCE_OWNER_VM)
1818          */
1819         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1820                             vm_list_node)
1821                 amdgpu_amdkfd_remove_eviction_fence(peer_vm->root.base.bo,
1822                                                 process_info->eviction_fence,
1823                                                 NULL, NULL);
1824
1825         ret = process_validate_vms(process_info);
1826         if (ret)
1827                 goto unreserve_out;
1828
1829         /* Validate BOs and update GPUVM page tables */
1830         list_for_each_entry_safe(mem, tmp_mem,
1831                                  &process_info->userptr_inval_list,
1832                                  validate_list.head) {
1833                 struct kfd_bo_va_list *bo_va_entry;
1834
1835                 bo = mem->bo;
1836
1837                 /* Copy pages array and validate the BO if we got user pages */
1838                 if (mem->user_pages[0]) {
1839                         amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
1840                                                      mem->user_pages);
1841                         amdgpu_bo_placement_from_domain(bo, mem->domain);
1842                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1843                         if (ret) {
1844                                 pr_err("%s: failed to validate BO\n", __func__);
1845                                 goto unreserve_out;
1846                         }
1847                 }
1848
1849                 /* Validate succeeded, now the BO owns the pages, free
1850                  * our copy of the pointer array. Put this BO back on
1851                  * the userptr_valid_list. If we need to revalidate
1852                  * it, we need to start from scratch.
1853                  */
1854                 kvfree(mem->user_pages);
1855                 mem->user_pages = NULL;
1856                 list_move_tail(&mem->validate_list.head,
1857                                &process_info->userptr_valid_list);
1858
1859                 /* Update mapping. If the BO was not validated
1860                  * (because we couldn't get user pages), this will
1861                  * clear the page table entries, which will result in
1862                  * VM faults if the GPU tries to access the invalid
1863                  * memory.
1864                  */
1865                 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1866                         if (!bo_va_entry->is_mapped)
1867                                 continue;
1868
1869                         ret = update_gpuvm_pte((struct amdgpu_device *)
1870                                                bo_va_entry->kgd_dev,
1871                                                bo_va_entry, &sync);
1872                         if (ret) {
1873                                 pr_err("%s: update PTE failed\n", __func__);
1874                                 /* make sure this gets validated again */
1875                                 atomic_inc(&mem->invalid);
1876                                 goto unreserve_out;
1877                         }
1878                 }
1879         }
1880
1881         /* Update page directories */
1882         ret = process_update_pds(process_info, &sync);
1883
1884 unreserve_out:
1885         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1886                             vm_list_node)
1887                 amdgpu_bo_fence(peer_vm->root.base.bo,
1888                                 &process_info->eviction_fence->base, true);
1889         ttm_eu_backoff_reservation(&ticket, &resv_list);
1890         amdgpu_sync_wait(&sync, false);
1891         amdgpu_sync_free(&sync);
1892 out:
1893         kfree(pd_bo_list_entries);
1894
1895         return ret;
1896 }
1897
1898 /* Worker callback to restore evicted userptr BOs
1899  *
1900  * Tries to update and validate all userptr BOs. If successful and no
1901  * concurrent evictions happened, the queues are restarted. Otherwise,
1902  * reschedule for another attempt later.
1903  */
1904 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1905 {
1906         struct delayed_work *dwork = to_delayed_work(work);
1907         struct amdkfd_process_info *process_info =
1908                 container_of(dwork, struct amdkfd_process_info,
1909                              restore_userptr_work);
1910         struct task_struct *usertask;
1911         struct mm_struct *mm;
1912         int evicted_bos;
1913
1914         evicted_bos = atomic_read(&process_info->evicted_bos);
1915         if (!evicted_bos)
1916                 return;
1917
1918         /* Reference task and mm in case of concurrent process termination */
1919         usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1920         if (!usertask)
1921                 return;
1922         mm = get_task_mm(usertask);
1923         if (!mm) {
1924                 put_task_struct(usertask);
1925                 return;
1926         }
1927
1928         mutex_lock(&process_info->lock);
1929
1930         if (update_invalid_user_pages(process_info, mm))
1931                 goto unlock_out;
1932         /* userptr_inval_list can be empty if all evicted userptr BOs
1933          * have been freed. In that case there is nothing to validate
1934          * and we can just restart the queues.
1935          */
1936         if (!list_empty(&process_info->userptr_inval_list)) {
1937                 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1938                         goto unlock_out; /* Concurrent eviction, try again */
1939
1940                 if (validate_invalid_user_pages(process_info))
1941                         goto unlock_out;
1942         }
1943         /* Final check for concurrent evicton and atomic update. If
1944          * another eviction happens after successful update, it will
1945          * be a first eviction that calls quiesce_mm. The eviction
1946          * reference counting inside KFD will handle this case.
1947          */
1948         if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1949             evicted_bos)
1950                 goto unlock_out;
1951         evicted_bos = 0;
1952         if (kgd2kfd->resume_mm(mm)) {
1953                 pr_err("%s: Failed to resume KFD\n", __func__);
1954                 /* No recovery from this failure. Probably the CP is
1955                  * hanging. No point trying again.
1956                  */
1957         }
1958 unlock_out:
1959         mutex_unlock(&process_info->lock);
1960         mmput(mm);
1961         put_task_struct(usertask);
1962
1963         /* If validation failed, reschedule another attempt */
1964         if (evicted_bos)
1965                 schedule_delayed_work(&process_info->restore_userptr_work,
1966                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1967 }
1968
1969 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1970  *   KFD process identified by process_info
1971  *
1972  * @process_info: amdkfd_process_info of the KFD process
1973  *
1974  * After memory eviction, restore thread calls this function. The function
1975  * should be called when the Process is still valid. BO restore involves -
1976  *
1977  * 1.  Release old eviction fence and create new one
1978  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
1979  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
1980  *     BOs that need to be reserved.
1981  * 4.  Reserve all the BOs
1982  * 5.  Validate of PD and PT BOs.
1983  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
1984  * 7.  Add fence to all PD and PT BOs.
1985  * 8.  Unreserve all BOs
1986  */
1987 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
1988 {
1989         struct amdgpu_bo_list_entry *pd_bo_list;
1990         struct amdkfd_process_info *process_info = info;
1991         struct amdgpu_vm *peer_vm;
1992         struct kgd_mem *mem;
1993         struct bo_vm_reservation_context ctx;
1994         struct amdgpu_amdkfd_fence *new_fence;
1995         int ret = 0, i;
1996         struct list_head duplicate_save;
1997         struct amdgpu_sync sync_obj;
1998
1999         INIT_LIST_HEAD(&duplicate_save);
2000         INIT_LIST_HEAD(&ctx.list);
2001         INIT_LIST_HEAD(&ctx.duplicates);
2002
2003         pd_bo_list = kcalloc(process_info->n_vms,
2004                              sizeof(struct amdgpu_bo_list_entry),
2005                              GFP_KERNEL);
2006         if (!pd_bo_list)
2007                 return -ENOMEM;
2008
2009         i = 0;
2010         mutex_lock(&process_info->lock);
2011         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2012                         vm_list_node)
2013                 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2014
2015         /* Reserve all BOs and page tables/directory. Add all BOs from
2016          * kfd_bo_list to ctx.list
2017          */
2018         list_for_each_entry(mem, &process_info->kfd_bo_list,
2019                             validate_list.head) {
2020
2021                 list_add_tail(&mem->resv_list.head, &ctx.list);
2022                 mem->resv_list.bo = mem->validate_list.bo;
2023                 mem->resv_list.shared = mem->validate_list.shared;
2024         }
2025
2026         ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2027                                      false, &duplicate_save);
2028         if (ret) {
2029                 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2030                 goto ttm_reserve_fail;
2031         }
2032
2033         amdgpu_sync_create(&sync_obj);
2034
2035         /* Validate PDs and PTs */
2036         ret = process_validate_vms(process_info);
2037         if (ret)
2038                 goto validate_map_fail;
2039
2040         /* Wait for PD/PTs validate to finish */
2041         /* FIXME: I think this isn't needed */
2042         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2043                             vm_list_node) {
2044                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2045
2046                 ttm_bo_wait(&bo->tbo, false, false);
2047         }
2048
2049         /* Validate BOs and map them to GPUVM (update VM page tables). */
2050         list_for_each_entry(mem, &process_info->kfd_bo_list,
2051                             validate_list.head) {
2052
2053                 struct amdgpu_bo *bo = mem->bo;
2054                 uint32_t domain = mem->domain;
2055                 struct kfd_bo_va_list *bo_va_entry;
2056
2057                 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2058                 if (ret) {
2059                         pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2060                         goto validate_map_fail;
2061                 }
2062
2063                 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2064                                     bo_list) {
2065                         ret = update_gpuvm_pte((struct amdgpu_device *)
2066                                               bo_va_entry->kgd_dev,
2067                                               bo_va_entry,
2068                                               &sync_obj);
2069                         if (ret) {
2070                                 pr_debug("Memory eviction: update PTE failed. Try again\n");
2071                                 goto validate_map_fail;
2072                         }
2073                 }
2074         }
2075
2076         /* Update page directories */
2077         ret = process_update_pds(process_info, &sync_obj);
2078         if (ret) {
2079                 pr_debug("Memory eviction: update PDs failed. Try again\n");
2080                 goto validate_map_fail;
2081         }
2082
2083         amdgpu_sync_wait(&sync_obj, false);
2084
2085         /* Release old eviction fence and create new one, because fence only
2086          * goes from unsignaled to signaled, fence cannot be reused.
2087          * Use context and mm from the old fence.
2088          */
2089         new_fence = amdgpu_amdkfd_fence_create(
2090                                 process_info->eviction_fence->base.context,
2091                                 process_info->eviction_fence->mm);
2092         if (!new_fence) {
2093                 pr_err("Failed to create eviction fence\n");
2094                 ret = -ENOMEM;
2095                 goto validate_map_fail;
2096         }
2097         dma_fence_put(&process_info->eviction_fence->base);
2098         process_info->eviction_fence = new_fence;
2099         *ef = dma_fence_get(&new_fence->base);
2100
2101         /* Wait for validate to finish and attach new eviction fence */
2102         list_for_each_entry(mem, &process_info->kfd_bo_list,
2103                 validate_list.head)
2104                 ttm_bo_wait(&mem->bo->tbo, false, false);
2105         list_for_each_entry(mem, &process_info->kfd_bo_list,
2106                 validate_list.head)
2107                 amdgpu_bo_fence(mem->bo,
2108                         &process_info->eviction_fence->base, true);
2109
2110         /* Attach eviction fence to PD / PT BOs */
2111         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2112                             vm_list_node) {
2113                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2114
2115                 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2116         }
2117
2118 validate_map_fail:
2119         ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2120         amdgpu_sync_free(&sync_obj);
2121 ttm_reserve_fail:
2122         mutex_unlock(&process_info->lock);
2123         kfree(pd_bo_list);
2124         return ret;
2125 }
This page took 0.159556 seconds and 4 git commands to generate.