]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drm/scheduler: Scheduler priority fixes (v2)
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
27
28 #include "amdgpu_object.h"
29 #include "amdgpu_vm.h"
30 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_dma_buf.h"
32 #include <uapi/linux/kfd_ioctl.h>
33
34 /* BO flag to indicate a KFD userptr BO */
35 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
36
37 /* Userptr restore delay, just long enough to allow consecutive VM
38  * changes to accumulate
39  */
40 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
41
42 /* Impose limit on how much memory KFD can use */
43 static struct {
44         uint64_t max_system_mem_limit;
45         uint64_t max_ttm_mem_limit;
46         int64_t system_mem_used;
47         int64_t ttm_mem_used;
48         spinlock_t mem_limit_lock;
49 } kfd_mem_limit;
50
51 /* Struct used for amdgpu_amdkfd_bo_validate */
52 struct amdgpu_vm_parser {
53         uint32_t        domain;
54         bool            wait;
55 };
56
57 static const char * const domain_bit_to_string[] = {
58                 "CPU",
59                 "GTT",
60                 "VRAM",
61                 "GDS",
62                 "GWS",
63                 "OA"
64 };
65
66 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
67
68 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
69
70
71 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
72 {
73         return (struct amdgpu_device *)kgd;
74 }
75
76 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
77                 struct kgd_mem *mem)
78 {
79         struct kfd_bo_va_list *entry;
80
81         list_for_each_entry(entry, &mem->bo_va_list, bo_list)
82                 if (entry->bo_va->base.vm == avm)
83                         return false;
84
85         return true;
86 }
87
88 /* Set memory usage limits. Current, limits are
89  *  System (TTM + userptr) memory - 15/16th System RAM
90  *  TTM memory - 3/8th System RAM
91  */
92 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
93 {
94         struct sysinfo si;
95         uint64_t mem;
96
97         si_meminfo(&si);
98         mem = si.totalram - si.totalhigh;
99         mem *= si.mem_unit;
100
101         spin_lock_init(&kfd_mem_limit.mem_limit_lock);
102         kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
103         kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
104         pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
105                 (kfd_mem_limit.max_system_mem_limit >> 20),
106                 (kfd_mem_limit.max_ttm_mem_limit >> 20));
107 }
108
109 /* Estimate page table size needed to represent a given memory size
110  *
111  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
112  * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
113  * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
114  * for 2MB pages for TLB efficiency. However, small allocations and
115  * fragmented system memory still need some 4KB pages. We choose a
116  * compromise that should work in most cases without reserving too
117  * much memory for page tables unnecessarily (factor 16K, >> 14).
118  */
119 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
120
121 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
122                 uint64_t size, u32 domain, bool sg)
123 {
124         uint64_t reserved_for_pt =
125                 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
126         size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
127         int ret = 0;
128
129         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
130                                        sizeof(struct amdgpu_bo));
131
132         vram_needed = 0;
133         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
134                 /* TTM GTT memory */
135                 system_mem_needed = acc_size + size;
136                 ttm_mem_needed = acc_size + size;
137         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
138                 /* Userptr */
139                 system_mem_needed = acc_size + size;
140                 ttm_mem_needed = acc_size;
141         } else {
142                 /* VRAM and SG */
143                 system_mem_needed = acc_size;
144                 ttm_mem_needed = acc_size;
145                 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
146                         vram_needed = size;
147         }
148
149         spin_lock(&kfd_mem_limit.mem_limit_lock);
150
151         if (kfd_mem_limit.system_mem_used + system_mem_needed >
152             kfd_mem_limit.max_system_mem_limit)
153                 pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
154
155         if ((kfd_mem_limit.system_mem_used + system_mem_needed >
156              kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
157             (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
158              kfd_mem_limit.max_ttm_mem_limit) ||
159             (adev->kfd.vram_used + vram_needed >
160              adev->gmc.real_vram_size - reserved_for_pt)) {
161                 ret = -ENOMEM;
162         } else {
163                 kfd_mem_limit.system_mem_used += system_mem_needed;
164                 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
165                 adev->kfd.vram_used += vram_needed;
166         }
167
168         spin_unlock(&kfd_mem_limit.mem_limit_lock);
169         return ret;
170 }
171
172 static void unreserve_mem_limit(struct amdgpu_device *adev,
173                 uint64_t size, u32 domain, bool sg)
174 {
175         size_t acc_size;
176
177         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
178                                        sizeof(struct amdgpu_bo));
179
180         spin_lock(&kfd_mem_limit.mem_limit_lock);
181         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
182                 kfd_mem_limit.system_mem_used -= (acc_size + size);
183                 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
184         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
185                 kfd_mem_limit.system_mem_used -= (acc_size + size);
186                 kfd_mem_limit.ttm_mem_used -= acc_size;
187         } else {
188                 kfd_mem_limit.system_mem_used -= acc_size;
189                 kfd_mem_limit.ttm_mem_used -= acc_size;
190                 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
191                         adev->kfd.vram_used -= size;
192                         WARN_ONCE(adev->kfd.vram_used < 0,
193                                   "kfd VRAM memory accounting unbalanced");
194                 }
195         }
196         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
197                   "kfd system memory accounting unbalanced");
198         WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
199                   "kfd TTM memory accounting unbalanced");
200
201         spin_unlock(&kfd_mem_limit.mem_limit_lock);
202 }
203
204 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
205 {
206         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
207         u32 domain = bo->preferred_domains;
208         bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
209
210         if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
211                 domain = AMDGPU_GEM_DOMAIN_CPU;
212                 sg = false;
213         }
214
215         unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
216 }
217
218
219 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
220  *  reservation object.
221  *
222  * @bo: [IN] Remove eviction fence(s) from this BO
223  * @ef: [IN] This eviction fence is removed if it
224  *  is present in the shared list.
225  *
226  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
227  */
228 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
229                                         struct amdgpu_amdkfd_fence *ef)
230 {
231         struct dma_resv *resv = bo->tbo.base.resv;
232         struct dma_resv_list *old, *new;
233         unsigned int i, j, k;
234
235         if (!ef)
236                 return -EINVAL;
237
238         old = dma_resv_get_list(resv);
239         if (!old)
240                 return 0;
241
242         new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
243                       GFP_KERNEL);
244         if (!new)
245                 return -ENOMEM;
246
247         /* Go through all the shared fences in the resevation object and sort
248          * the interesting ones to the end of the list.
249          */
250         for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
251                 struct dma_fence *f;
252
253                 f = rcu_dereference_protected(old->shared[i],
254                                               dma_resv_held(resv));
255
256                 if (f->context == ef->base.context)
257                         RCU_INIT_POINTER(new->shared[--j], f);
258                 else
259                         RCU_INIT_POINTER(new->shared[k++], f);
260         }
261         new->shared_max = old->shared_max;
262         new->shared_count = k;
263
264         /* Install the new fence list, seqcount provides the barriers */
265         preempt_disable();
266         write_seqcount_begin(&resv->seq);
267         RCU_INIT_POINTER(resv->fence, new);
268         write_seqcount_end(&resv->seq);
269         preempt_enable();
270
271         /* Drop the references to the removed fences or move them to ef_list */
272         for (i = j, k = 0; i < old->shared_count; ++i) {
273                 struct dma_fence *f;
274
275                 f = rcu_dereference_protected(new->shared[i],
276                                               dma_resv_held(resv));
277                 dma_fence_put(f);
278         }
279         kfree_rcu(old, rcu);
280
281         return 0;
282 }
283
284 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
285 {
286         struct amdgpu_bo *root = bo;
287         struct amdgpu_vm_bo_base *vm_bo;
288         struct amdgpu_vm *vm;
289         struct amdkfd_process_info *info;
290         struct amdgpu_amdkfd_fence *ef;
291         int ret;
292
293         /* we can always get vm_bo from root PD bo.*/
294         while (root->parent)
295                 root = root->parent;
296
297         vm_bo = root->vm_bo;
298         if (!vm_bo)
299                 return 0;
300
301         vm = vm_bo->vm;
302         if (!vm)
303                 return 0;
304
305         info = vm->process_info;
306         if (!info || !info->eviction_fence)
307                 return 0;
308
309         ef = container_of(dma_fence_get(&info->eviction_fence->base),
310                         struct amdgpu_amdkfd_fence, base);
311
312         BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
313         ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
314         dma_resv_unlock(bo->tbo.base.resv);
315
316         dma_fence_put(&ef->base);
317         return ret;
318 }
319
320 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
321                                      bool wait)
322 {
323         struct ttm_operation_ctx ctx = { false, false };
324         int ret;
325
326         if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
327                  "Called with userptr BO"))
328                 return -EINVAL;
329
330         amdgpu_bo_placement_from_domain(bo, domain);
331
332         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
333         if (ret)
334                 goto validate_fail;
335         if (wait)
336                 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
337
338 validate_fail:
339         return ret;
340 }
341
342 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
343 {
344         struct amdgpu_vm_parser *p = param;
345
346         return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
347 }
348
349 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
350  *
351  * Page directories are not updated here because huge page handling
352  * during page table updates can invalidate page directory entries
353  * again. Page directories are only updated after updating page
354  * tables.
355  */
356 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
357 {
358         struct amdgpu_bo *pd = vm->root.base.bo;
359         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
360         struct amdgpu_vm_parser param;
361         int ret;
362
363         param.domain = AMDGPU_GEM_DOMAIN_VRAM;
364         param.wait = false;
365
366         ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
367                                         &param);
368         if (ret) {
369                 pr_err("failed to validate PT BOs\n");
370                 return ret;
371         }
372
373         ret = amdgpu_amdkfd_validate(&param, pd);
374         if (ret) {
375                 pr_err("failed to validate PD\n");
376                 return ret;
377         }
378
379         vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
380
381         if (vm->use_cpu_for_update) {
382                 ret = amdgpu_bo_kmap(pd, NULL);
383                 if (ret) {
384                         pr_err("failed to kmap PD, ret=%d\n", ret);
385                         return ret;
386                 }
387         }
388
389         return 0;
390 }
391
392 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
393 {
394         struct amdgpu_bo *pd = vm->root.base.bo;
395         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
396         int ret;
397
398         ret = amdgpu_vm_update_pdes(adev, vm, false);
399         if (ret)
400                 return ret;
401
402         return amdgpu_sync_fence(sync, vm->last_update);
403 }
404
405 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
406 {
407         struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
408         bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
409         uint32_t mapping_flags;
410
411         mapping_flags = AMDGPU_VM_PAGE_READABLE;
412         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
413                 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
414         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
415                 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
416
417         switch (adev->asic_type) {
418         case CHIP_ARCTURUS:
419                 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
420                         if (bo_adev == adev)
421                                 mapping_flags |= coherent ?
422                                         AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
423                         else
424                                 mapping_flags |= AMDGPU_VM_MTYPE_UC;
425                 } else {
426                         mapping_flags |= coherent ?
427                                 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
428                 }
429                 break;
430         default:
431                 mapping_flags |= coherent ?
432                         AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
433         }
434
435         return amdgpu_gem_va_map_flags(adev, mapping_flags);
436 }
437
438 /* add_bo_to_vm - Add a BO to a VM
439  *
440  * Everything that needs to bo done only once when a BO is first added
441  * to a VM. It can later be mapped and unmapped many times without
442  * repeating these steps.
443  *
444  * 1. Allocate and initialize BO VA entry data structure
445  * 2. Add BO to the VM
446  * 3. Determine ASIC-specific PTE flags
447  * 4. Alloc page tables and directories if needed
448  * 4a.  Validate new page tables and directories
449  */
450 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
451                 struct amdgpu_vm *vm, bool is_aql,
452                 struct kfd_bo_va_list **p_bo_va_entry)
453 {
454         int ret;
455         struct kfd_bo_va_list *bo_va_entry;
456         struct amdgpu_bo *bo = mem->bo;
457         uint64_t va = mem->va;
458         struct list_head *list_bo_va = &mem->bo_va_list;
459         unsigned long bo_size = bo->tbo.mem.size;
460
461         if (!va) {
462                 pr_err("Invalid VA when adding BO to VM\n");
463                 return -EINVAL;
464         }
465
466         if (is_aql)
467                 va += bo_size;
468
469         bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
470         if (!bo_va_entry)
471                 return -ENOMEM;
472
473         pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
474                         va + bo_size, vm);
475
476         /* Add BO to VM internal data structures*/
477         bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
478         if (!bo_va_entry->bo_va) {
479                 ret = -EINVAL;
480                 pr_err("Failed to add BO object to VM. ret == %d\n",
481                                 ret);
482                 goto err_vmadd;
483         }
484
485         bo_va_entry->va = va;
486         bo_va_entry->pte_flags = get_pte_flags(adev, mem);
487         bo_va_entry->kgd_dev = (void *)adev;
488         list_add(&bo_va_entry->bo_list, list_bo_va);
489
490         if (p_bo_va_entry)
491                 *p_bo_va_entry = bo_va_entry;
492
493         /* Allocate validate page tables if needed */
494         ret = vm_validate_pt_pd_bos(vm);
495         if (ret) {
496                 pr_err("validate_pt_pd_bos() failed\n");
497                 goto err_alloc_pts;
498         }
499
500         return 0;
501
502 err_alloc_pts:
503         amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
504         list_del(&bo_va_entry->bo_list);
505 err_vmadd:
506         kfree(bo_va_entry);
507         return ret;
508 }
509
510 static void remove_bo_from_vm(struct amdgpu_device *adev,
511                 struct kfd_bo_va_list *entry, unsigned long size)
512 {
513         pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
514                         entry->va,
515                         entry->va + size, entry);
516         amdgpu_vm_bo_rmv(adev, entry->bo_va);
517         list_del(&entry->bo_list);
518         kfree(entry);
519 }
520
521 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
522                                 struct amdkfd_process_info *process_info,
523                                 bool userptr)
524 {
525         struct ttm_validate_buffer *entry = &mem->validate_list;
526         struct amdgpu_bo *bo = mem->bo;
527
528         INIT_LIST_HEAD(&entry->head);
529         entry->num_shared = 1;
530         entry->bo = &bo->tbo;
531         mutex_lock(&process_info->lock);
532         if (userptr)
533                 list_add_tail(&entry->head, &process_info->userptr_valid_list);
534         else
535                 list_add_tail(&entry->head, &process_info->kfd_bo_list);
536         mutex_unlock(&process_info->lock);
537 }
538
539 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
540                 struct amdkfd_process_info *process_info)
541 {
542         struct ttm_validate_buffer *bo_list_entry;
543
544         bo_list_entry = &mem->validate_list;
545         mutex_lock(&process_info->lock);
546         list_del(&bo_list_entry->head);
547         mutex_unlock(&process_info->lock);
548 }
549
550 /* Initializes user pages. It registers the MMU notifier and validates
551  * the userptr BO in the GTT domain.
552  *
553  * The BO must already be on the userptr_valid_list. Otherwise an
554  * eviction and restore may happen that leaves the new BO unmapped
555  * with the user mode queues running.
556  *
557  * Takes the process_info->lock to protect against concurrent restore
558  * workers.
559  *
560  * Returns 0 for success, negative errno for errors.
561  */
562 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
563 {
564         struct amdkfd_process_info *process_info = mem->process_info;
565         struct amdgpu_bo *bo = mem->bo;
566         struct ttm_operation_ctx ctx = { true, false };
567         int ret = 0;
568
569         mutex_lock(&process_info->lock);
570
571         ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
572         if (ret) {
573                 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
574                 goto out;
575         }
576
577         ret = amdgpu_mn_register(bo, user_addr);
578         if (ret) {
579                 pr_err("%s: Failed to register MMU notifier: %d\n",
580                        __func__, ret);
581                 goto out;
582         }
583
584         ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
585         if (ret) {
586                 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
587                 goto unregister_out;
588         }
589
590         ret = amdgpu_bo_reserve(bo, true);
591         if (ret) {
592                 pr_err("%s: Failed to reserve BO\n", __func__);
593                 goto release_out;
594         }
595         amdgpu_bo_placement_from_domain(bo, mem->domain);
596         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
597         if (ret)
598                 pr_err("%s: failed to validate BO\n", __func__);
599         amdgpu_bo_unreserve(bo);
600
601 release_out:
602         amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
603 unregister_out:
604         if (ret)
605                 amdgpu_mn_unregister(bo);
606 out:
607         mutex_unlock(&process_info->lock);
608         return ret;
609 }
610
611 /* Reserving a BO and its page table BOs must happen atomically to
612  * avoid deadlocks. Some operations update multiple VMs at once. Track
613  * all the reservation info in a context structure. Optionally a sync
614  * object can track VM updates.
615  */
616 struct bo_vm_reservation_context {
617         struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
618         unsigned int n_vms;                 /* Number of VMs reserved       */
619         struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
620         struct ww_acquire_ctx ticket;       /* Reservation ticket           */
621         struct list_head list, duplicates;  /* BO lists                     */
622         struct amdgpu_sync *sync;           /* Pointer to sync object       */
623         bool reserved;                      /* Whether BOs are reserved     */
624 };
625
626 enum bo_vm_match {
627         BO_VM_NOT_MAPPED = 0,   /* Match VMs where a BO is not mapped */
628         BO_VM_MAPPED,           /* Match VMs where a BO is mapped     */
629         BO_VM_ALL,              /* Match all VMs a BO was added to    */
630 };
631
632 /**
633  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
634  * @mem: KFD BO structure.
635  * @vm: the VM to reserve.
636  * @ctx: the struct that will be used in unreserve_bo_and_vms().
637  */
638 static int reserve_bo_and_vm(struct kgd_mem *mem,
639                               struct amdgpu_vm *vm,
640                               struct bo_vm_reservation_context *ctx)
641 {
642         struct amdgpu_bo *bo = mem->bo;
643         int ret;
644
645         WARN_ON(!vm);
646
647         ctx->reserved = false;
648         ctx->n_vms = 1;
649         ctx->sync = &mem->sync;
650
651         INIT_LIST_HEAD(&ctx->list);
652         INIT_LIST_HEAD(&ctx->duplicates);
653
654         ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
655         if (!ctx->vm_pd)
656                 return -ENOMEM;
657
658         ctx->kfd_bo.priority = 0;
659         ctx->kfd_bo.tv.bo = &bo->tbo;
660         ctx->kfd_bo.tv.num_shared = 1;
661         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
662
663         amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
664
665         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
666                                      false, &ctx->duplicates);
667         if (ret) {
668                 pr_err("Failed to reserve buffers in ttm.\n");
669                 kfree(ctx->vm_pd);
670                 ctx->vm_pd = NULL;
671                 return ret;
672         }
673
674         ctx->reserved = true;
675         return 0;
676 }
677
678 /**
679  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
680  * @mem: KFD BO structure.
681  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
682  * is used. Otherwise, a single VM associated with the BO.
683  * @map_type: the mapping status that will be used to filter the VMs.
684  * @ctx: the struct that will be used in unreserve_bo_and_vms().
685  *
686  * Returns 0 for success, negative for failure.
687  */
688 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
689                                 struct amdgpu_vm *vm, enum bo_vm_match map_type,
690                                 struct bo_vm_reservation_context *ctx)
691 {
692         struct amdgpu_bo *bo = mem->bo;
693         struct kfd_bo_va_list *entry;
694         unsigned int i;
695         int ret;
696
697         ctx->reserved = false;
698         ctx->n_vms = 0;
699         ctx->vm_pd = NULL;
700         ctx->sync = &mem->sync;
701
702         INIT_LIST_HEAD(&ctx->list);
703         INIT_LIST_HEAD(&ctx->duplicates);
704
705         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
706                 if ((vm && vm != entry->bo_va->base.vm) ||
707                         (entry->is_mapped != map_type
708                         && map_type != BO_VM_ALL))
709                         continue;
710
711                 ctx->n_vms++;
712         }
713
714         if (ctx->n_vms != 0) {
715                 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
716                                      GFP_KERNEL);
717                 if (!ctx->vm_pd)
718                         return -ENOMEM;
719         }
720
721         ctx->kfd_bo.priority = 0;
722         ctx->kfd_bo.tv.bo = &bo->tbo;
723         ctx->kfd_bo.tv.num_shared = 1;
724         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
725
726         i = 0;
727         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
728                 if ((vm && vm != entry->bo_va->base.vm) ||
729                         (entry->is_mapped != map_type
730                         && map_type != BO_VM_ALL))
731                         continue;
732
733                 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
734                                 &ctx->vm_pd[i]);
735                 i++;
736         }
737
738         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
739                                      false, &ctx->duplicates);
740         if (ret) {
741                 pr_err("Failed to reserve buffers in ttm.\n");
742                 kfree(ctx->vm_pd);
743                 ctx->vm_pd = NULL;
744                 return ret;
745         }
746
747         ctx->reserved = true;
748         return 0;
749 }
750
751 /**
752  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
753  * @ctx: Reservation context to unreserve
754  * @wait: Optionally wait for a sync object representing pending VM updates
755  * @intr: Whether the wait is interruptible
756  *
757  * Also frees any resources allocated in
758  * reserve_bo_and_(cond_)vm(s). Returns the status from
759  * amdgpu_sync_wait.
760  */
761 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
762                                  bool wait, bool intr)
763 {
764         int ret = 0;
765
766         if (wait)
767                 ret = amdgpu_sync_wait(ctx->sync, intr);
768
769         if (ctx->reserved)
770                 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
771         kfree(ctx->vm_pd);
772
773         ctx->sync = NULL;
774
775         ctx->reserved = false;
776         ctx->vm_pd = NULL;
777
778         return ret;
779 }
780
781 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
782                                 struct kfd_bo_va_list *entry,
783                                 struct amdgpu_sync *sync)
784 {
785         struct amdgpu_bo_va *bo_va = entry->bo_va;
786         struct amdgpu_vm *vm = bo_va->base.vm;
787
788         amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
789
790         amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
791
792         amdgpu_sync_fence(sync, bo_va->last_pt_update);
793
794         return 0;
795 }
796
797 static int update_gpuvm_pte(struct amdgpu_device *adev,
798                 struct kfd_bo_va_list *entry,
799                 struct amdgpu_sync *sync)
800 {
801         int ret;
802         struct amdgpu_bo_va *bo_va = entry->bo_va;
803
804         /* Update the page tables  */
805         ret = amdgpu_vm_bo_update(adev, bo_va, false);
806         if (ret) {
807                 pr_err("amdgpu_vm_bo_update failed\n");
808                 return ret;
809         }
810
811         return amdgpu_sync_fence(sync, bo_va->last_pt_update);
812 }
813
814 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
815                 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
816                 bool no_update_pte)
817 {
818         int ret;
819
820         /* Set virtual address for the allocation */
821         ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
822                                amdgpu_bo_size(entry->bo_va->base.bo),
823                                entry->pte_flags);
824         if (ret) {
825                 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
826                                 entry->va, ret);
827                 return ret;
828         }
829
830         if (no_update_pte)
831                 return 0;
832
833         ret = update_gpuvm_pte(adev, entry, sync);
834         if (ret) {
835                 pr_err("update_gpuvm_pte() failed\n");
836                 goto update_gpuvm_pte_failed;
837         }
838
839         return 0;
840
841 update_gpuvm_pte_failed:
842         unmap_bo_from_gpuvm(adev, entry, sync);
843         return ret;
844 }
845
846 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
847 {
848         struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
849
850         if (!sg)
851                 return NULL;
852         if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
853                 kfree(sg);
854                 return NULL;
855         }
856         sg->sgl->dma_address = addr;
857         sg->sgl->length = size;
858 #ifdef CONFIG_NEED_SG_DMA_LENGTH
859         sg->sgl->dma_length = size;
860 #endif
861         return sg;
862 }
863
864 static int process_validate_vms(struct amdkfd_process_info *process_info)
865 {
866         struct amdgpu_vm *peer_vm;
867         int ret;
868
869         list_for_each_entry(peer_vm, &process_info->vm_list_head,
870                             vm_list_node) {
871                 ret = vm_validate_pt_pd_bos(peer_vm);
872                 if (ret)
873                         return ret;
874         }
875
876         return 0;
877 }
878
879 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
880                                  struct amdgpu_sync *sync)
881 {
882         struct amdgpu_vm *peer_vm;
883         int ret;
884
885         list_for_each_entry(peer_vm, &process_info->vm_list_head,
886                             vm_list_node) {
887                 struct amdgpu_bo *pd = peer_vm->root.base.bo;
888
889                 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
890                                        AMDGPU_SYNC_NE_OWNER,
891                                        AMDGPU_FENCE_OWNER_KFD);
892                 if (ret)
893                         return ret;
894         }
895
896         return 0;
897 }
898
899 static int process_update_pds(struct amdkfd_process_info *process_info,
900                               struct amdgpu_sync *sync)
901 {
902         struct amdgpu_vm *peer_vm;
903         int ret;
904
905         list_for_each_entry(peer_vm, &process_info->vm_list_head,
906                             vm_list_node) {
907                 ret = vm_update_pds(peer_vm, sync);
908                 if (ret)
909                         return ret;
910         }
911
912         return 0;
913 }
914
915 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
916                        struct dma_fence **ef)
917 {
918         struct amdkfd_process_info *info = NULL;
919         int ret;
920
921         if (!*process_info) {
922                 info = kzalloc(sizeof(*info), GFP_KERNEL);
923                 if (!info)
924                         return -ENOMEM;
925
926                 mutex_init(&info->lock);
927                 INIT_LIST_HEAD(&info->vm_list_head);
928                 INIT_LIST_HEAD(&info->kfd_bo_list);
929                 INIT_LIST_HEAD(&info->userptr_valid_list);
930                 INIT_LIST_HEAD(&info->userptr_inval_list);
931
932                 info->eviction_fence =
933                         amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
934                                                    current->mm);
935                 if (!info->eviction_fence) {
936                         pr_err("Failed to create eviction fence\n");
937                         ret = -ENOMEM;
938                         goto create_evict_fence_fail;
939                 }
940
941                 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
942                 atomic_set(&info->evicted_bos, 0);
943                 INIT_DELAYED_WORK(&info->restore_userptr_work,
944                                   amdgpu_amdkfd_restore_userptr_worker);
945
946                 *process_info = info;
947                 *ef = dma_fence_get(&info->eviction_fence->base);
948         }
949
950         vm->process_info = *process_info;
951
952         /* Validate page directory and attach eviction fence */
953         ret = amdgpu_bo_reserve(vm->root.base.bo, true);
954         if (ret)
955                 goto reserve_pd_fail;
956         ret = vm_validate_pt_pd_bos(vm);
957         if (ret) {
958                 pr_err("validate_pt_pd_bos() failed\n");
959                 goto validate_pd_fail;
960         }
961         ret = amdgpu_bo_sync_wait(vm->root.base.bo,
962                                   AMDGPU_FENCE_OWNER_KFD, false);
963         if (ret)
964                 goto wait_pd_fail;
965         ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
966         if (ret)
967                 goto reserve_shared_fail;
968         amdgpu_bo_fence(vm->root.base.bo,
969                         &vm->process_info->eviction_fence->base, true);
970         amdgpu_bo_unreserve(vm->root.base.bo);
971
972         /* Update process info */
973         mutex_lock(&vm->process_info->lock);
974         list_add_tail(&vm->vm_list_node,
975                         &(vm->process_info->vm_list_head));
976         vm->process_info->n_vms++;
977         mutex_unlock(&vm->process_info->lock);
978
979         return 0;
980
981 reserve_shared_fail:
982 wait_pd_fail:
983 validate_pd_fail:
984         amdgpu_bo_unreserve(vm->root.base.bo);
985 reserve_pd_fail:
986         vm->process_info = NULL;
987         if (info) {
988                 /* Two fence references: one in info and one in *ef */
989                 dma_fence_put(&info->eviction_fence->base);
990                 dma_fence_put(*ef);
991                 *ef = NULL;
992                 *process_info = NULL;
993                 put_pid(info->pid);
994 create_evict_fence_fail:
995                 mutex_destroy(&info->lock);
996                 kfree(info);
997         }
998         return ret;
999 }
1000
1001 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
1002                                           void **vm, void **process_info,
1003                                           struct dma_fence **ef)
1004 {
1005         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1006         struct amdgpu_vm *new_vm;
1007         int ret;
1008
1009         new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
1010         if (!new_vm)
1011                 return -ENOMEM;
1012
1013         /* Initialize AMDGPU part of the VM */
1014         ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
1015         if (ret) {
1016                 pr_err("Failed init vm ret %d\n", ret);
1017                 goto amdgpu_vm_init_fail;
1018         }
1019
1020         /* Initialize KFD part of the VM and process info */
1021         ret = init_kfd_vm(new_vm, process_info, ef);
1022         if (ret)
1023                 goto init_kfd_vm_fail;
1024
1025         *vm = (void *) new_vm;
1026
1027         return 0;
1028
1029 init_kfd_vm_fail:
1030         amdgpu_vm_fini(adev, new_vm);
1031 amdgpu_vm_init_fail:
1032         kfree(new_vm);
1033         return ret;
1034 }
1035
1036 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1037                                            struct file *filp, unsigned int pasid,
1038                                            void **vm, void **process_info,
1039                                            struct dma_fence **ef)
1040 {
1041         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1042         struct drm_file *drm_priv = filp->private_data;
1043         struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
1044         struct amdgpu_vm *avm = &drv_priv->vm;
1045         int ret;
1046
1047         /* Already a compute VM? */
1048         if (avm->process_info)
1049                 return -EINVAL;
1050
1051         /* Convert VM into a compute VM */
1052         ret = amdgpu_vm_make_compute(adev, avm, pasid);
1053         if (ret)
1054                 return ret;
1055
1056         /* Initialize KFD part of the VM and process info */
1057         ret = init_kfd_vm(avm, process_info, ef);
1058         if (ret)
1059                 return ret;
1060
1061         *vm = (void *)avm;
1062
1063         return 0;
1064 }
1065
1066 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1067                                     struct amdgpu_vm *vm)
1068 {
1069         struct amdkfd_process_info *process_info = vm->process_info;
1070         struct amdgpu_bo *pd = vm->root.base.bo;
1071
1072         if (!process_info)
1073                 return;
1074
1075         /* Release eviction fence from PD */
1076         amdgpu_bo_reserve(pd, false);
1077         amdgpu_bo_fence(pd, NULL, false);
1078         amdgpu_bo_unreserve(pd);
1079
1080         /* Update process info */
1081         mutex_lock(&process_info->lock);
1082         process_info->n_vms--;
1083         list_del(&vm->vm_list_node);
1084         mutex_unlock(&process_info->lock);
1085
1086         vm->process_info = NULL;
1087
1088         /* Release per-process resources when last compute VM is destroyed */
1089         if (!process_info->n_vms) {
1090                 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1091                 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1092                 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1093
1094                 dma_fence_put(&process_info->eviction_fence->base);
1095                 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1096                 put_pid(process_info->pid);
1097                 mutex_destroy(&process_info->lock);
1098                 kfree(process_info);
1099         }
1100 }
1101
1102 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1103 {
1104         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1105         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1106
1107         if (WARN_ON(!kgd || !vm))
1108                 return;
1109
1110         pr_debug("Destroying process vm %p\n", vm);
1111
1112         /* Release the VM context */
1113         amdgpu_vm_fini(adev, avm);
1114         kfree(vm);
1115 }
1116
1117 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1118 {
1119         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1120         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1121
1122         if (WARN_ON(!kgd || !vm))
1123                 return;
1124
1125         pr_debug("Releasing process vm %p\n", vm);
1126
1127         /* The original pasid of amdgpu vm has already been
1128          * released during making a amdgpu vm to a compute vm
1129          * The current pasid is managed by kfd and will be
1130          * released on kfd process destroy. Set amdgpu pasid
1131          * to 0 to avoid duplicate release.
1132          */
1133         amdgpu_vm_release_compute(adev, avm);
1134 }
1135
1136 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1137 {
1138         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1139         struct amdgpu_bo *pd = avm->root.base.bo;
1140         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1141
1142         if (adev->asic_type < CHIP_VEGA10)
1143                 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1144         return avm->pd_phys_addr;
1145 }
1146
1147 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1148                 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1149                 void *vm, struct kgd_mem **mem,
1150                 uint64_t *offset, uint32_t flags)
1151 {
1152         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1153         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1154         enum ttm_bo_type bo_type = ttm_bo_type_device;
1155         struct sg_table *sg = NULL;
1156         uint64_t user_addr = 0;
1157         struct amdgpu_bo *bo;
1158         struct amdgpu_bo_param bp;
1159         u32 domain, alloc_domain;
1160         u64 alloc_flags;
1161         int ret;
1162
1163         /*
1164          * Check on which domain to allocate BO
1165          */
1166         if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1167                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1168                 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1169                 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1170                         AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1171                         AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1172         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1173                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1174                 alloc_flags = 0;
1175         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1176                 domain = AMDGPU_GEM_DOMAIN_GTT;
1177                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1178                 alloc_flags = 0;
1179                 if (!offset || !*offset)
1180                         return -EINVAL;
1181                 user_addr = untagged_addr(*offset);
1182         } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1183                         KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1184                 domain = AMDGPU_GEM_DOMAIN_GTT;
1185                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1186                 bo_type = ttm_bo_type_sg;
1187                 alloc_flags = 0;
1188                 if (size > UINT_MAX)
1189                         return -EINVAL;
1190                 sg = create_doorbell_sg(*offset, size);
1191                 if (!sg)
1192                         return -ENOMEM;
1193         } else {
1194                 return -EINVAL;
1195         }
1196
1197         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1198         if (!*mem) {
1199                 ret = -ENOMEM;
1200                 goto err;
1201         }
1202         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1203         mutex_init(&(*mem)->lock);
1204         (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1205
1206         /* Workaround for AQL queue wraparound bug. Map the same
1207          * memory twice. That means we only actually allocate half
1208          * the memory.
1209          */
1210         if ((*mem)->aql_queue)
1211                 size = size >> 1;
1212
1213         (*mem)->alloc_flags = flags;
1214
1215         amdgpu_sync_create(&(*mem)->sync);
1216
1217         ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1218         if (ret) {
1219                 pr_debug("Insufficient system memory\n");
1220                 goto err_reserve_limit;
1221         }
1222
1223         pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1224                         va, size, domain_string(alloc_domain));
1225
1226         memset(&bp, 0, sizeof(bp));
1227         bp.size = size;
1228         bp.byte_align = 1;
1229         bp.domain = alloc_domain;
1230         bp.flags = alloc_flags;
1231         bp.type = bo_type;
1232         bp.resv = NULL;
1233         ret = amdgpu_bo_create(adev, &bp, &bo);
1234         if (ret) {
1235                 pr_debug("Failed to create BO on domain %s. ret %d\n",
1236                                 domain_string(alloc_domain), ret);
1237                 goto err_bo_create;
1238         }
1239         if (bo_type == ttm_bo_type_sg) {
1240                 bo->tbo.sg = sg;
1241                 bo->tbo.ttm->sg = sg;
1242         }
1243         bo->kfd_bo = *mem;
1244         (*mem)->bo = bo;
1245         if (user_addr)
1246                 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1247
1248         (*mem)->va = va;
1249         (*mem)->domain = domain;
1250         (*mem)->mapped_to_gpu_memory = 0;
1251         (*mem)->process_info = avm->process_info;
1252         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1253
1254         if (user_addr) {
1255                 ret = init_user_pages(*mem, user_addr);
1256                 if (ret)
1257                         goto allocate_init_user_pages_failed;
1258         }
1259
1260         if (offset)
1261                 *offset = amdgpu_bo_mmap_offset(bo);
1262
1263         return 0;
1264
1265 allocate_init_user_pages_failed:
1266         remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1267         amdgpu_bo_unref(&bo);
1268         /* Don't unreserve system mem limit twice */
1269         goto err_reserve_limit;
1270 err_bo_create:
1271         unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1272 err_reserve_limit:
1273         mutex_destroy(&(*mem)->lock);
1274         kfree(*mem);
1275 err:
1276         if (sg) {
1277                 sg_free_table(sg);
1278                 kfree(sg);
1279         }
1280         return ret;
1281 }
1282
1283 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1284                 struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
1285 {
1286         struct amdkfd_process_info *process_info = mem->process_info;
1287         unsigned long bo_size = mem->bo->tbo.mem.size;
1288         struct kfd_bo_va_list *entry, *tmp;
1289         struct bo_vm_reservation_context ctx;
1290         struct ttm_validate_buffer *bo_list_entry;
1291         unsigned int mapped_to_gpu_memory;
1292         int ret;
1293         bool is_imported = 0;
1294
1295         mutex_lock(&mem->lock);
1296         mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1297         is_imported = mem->is_imported;
1298         mutex_unlock(&mem->lock);
1299         /* lock is not needed after this, since mem is unused and will
1300          * be freed anyway
1301          */
1302
1303         if (mapped_to_gpu_memory > 0) {
1304                 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1305                                 mem->va, bo_size);
1306                 return -EBUSY;
1307         }
1308
1309         /* Make sure restore workers don't access the BO any more */
1310         bo_list_entry = &mem->validate_list;
1311         mutex_lock(&process_info->lock);
1312         list_del(&bo_list_entry->head);
1313         mutex_unlock(&process_info->lock);
1314
1315         /* No more MMU notifiers */
1316         amdgpu_mn_unregister(mem->bo);
1317
1318         ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1319         if (unlikely(ret))
1320                 return ret;
1321
1322         /* The eviction fence should be removed by the last unmap.
1323          * TODO: Log an error condition if the bo still has the eviction fence
1324          * attached
1325          */
1326         amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1327                                         process_info->eviction_fence);
1328         pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1329                 mem->va + bo_size * (1 + mem->aql_queue));
1330
1331         /* Remove from VM internal data structures */
1332         list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1333                 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1334                                 entry, bo_size);
1335
1336         ret = unreserve_bo_and_vms(&ctx, false, false);
1337
1338         /* Free the sync object */
1339         amdgpu_sync_free(&mem->sync);
1340
1341         /* If the SG is not NULL, it's one we created for a doorbell or mmio
1342          * remap BO. We need to free it.
1343          */
1344         if (mem->bo->tbo.sg) {
1345                 sg_free_table(mem->bo->tbo.sg);
1346                 kfree(mem->bo->tbo.sg);
1347         }
1348
1349         /* Update the size of the BO being freed if it was allocated from
1350          * VRAM and is not imported.
1351          */
1352         if (size) {
1353                 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1354                     (!is_imported))
1355                         *size = bo_size;
1356                 else
1357                         *size = 0;
1358         }
1359
1360         /* Free the BO*/
1361         drm_gem_object_put(&mem->bo->tbo.base);
1362         mutex_destroy(&mem->lock);
1363         kfree(mem);
1364
1365         return ret;
1366 }
1367
1368 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1369                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1370 {
1371         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1372         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1373         int ret;
1374         struct amdgpu_bo *bo;
1375         uint32_t domain;
1376         struct kfd_bo_va_list *entry;
1377         struct bo_vm_reservation_context ctx;
1378         struct kfd_bo_va_list *bo_va_entry = NULL;
1379         struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1380         unsigned long bo_size;
1381         bool is_invalid_userptr = false;
1382
1383         bo = mem->bo;
1384         if (!bo) {
1385                 pr_err("Invalid BO when mapping memory to GPU\n");
1386                 return -EINVAL;
1387         }
1388
1389         /* Make sure restore is not running concurrently. Since we
1390          * don't map invalid userptr BOs, we rely on the next restore
1391          * worker to do the mapping
1392          */
1393         mutex_lock(&mem->process_info->lock);
1394
1395         /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1396          * sure that the MMU notifier is no longer running
1397          * concurrently and the queues are actually stopped
1398          */
1399         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1400                 mmap_write_lock(current->mm);
1401                 is_invalid_userptr = atomic_read(&mem->invalid);
1402                 mmap_write_unlock(current->mm);
1403         }
1404
1405         mutex_lock(&mem->lock);
1406
1407         domain = mem->domain;
1408         bo_size = bo->tbo.mem.size;
1409
1410         pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1411                         mem->va,
1412                         mem->va + bo_size * (1 + mem->aql_queue),
1413                         vm, domain_string(domain));
1414
1415         ret = reserve_bo_and_vm(mem, vm, &ctx);
1416         if (unlikely(ret))
1417                 goto out;
1418
1419         /* Userptr can be marked as "not invalid", but not actually be
1420          * validated yet (still in the system domain). In that case
1421          * the queues are still stopped and we can leave mapping for
1422          * the next restore worker
1423          */
1424         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1425             bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1426                 is_invalid_userptr = true;
1427
1428         if (check_if_add_bo_to_vm(avm, mem)) {
1429                 ret = add_bo_to_vm(adev, mem, avm, false,
1430                                 &bo_va_entry);
1431                 if (ret)
1432                         goto add_bo_to_vm_failed;
1433                 if (mem->aql_queue) {
1434                         ret = add_bo_to_vm(adev, mem, avm,
1435                                         true, &bo_va_entry_aql);
1436                         if (ret)
1437                                 goto add_bo_to_vm_failed_aql;
1438                 }
1439         } else {
1440                 ret = vm_validate_pt_pd_bos(avm);
1441                 if (unlikely(ret))
1442                         goto add_bo_to_vm_failed;
1443         }
1444
1445         if (mem->mapped_to_gpu_memory == 0 &&
1446             !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1447                 /* Validate BO only once. The eviction fence gets added to BO
1448                  * the first time it is mapped. Validate will wait for all
1449                  * background evictions to complete.
1450                  */
1451                 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1452                 if (ret) {
1453                         pr_debug("Validate failed\n");
1454                         goto map_bo_to_gpuvm_failed;
1455                 }
1456         }
1457
1458         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1459                 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1460                         pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1461                                         entry->va, entry->va + bo_size,
1462                                         entry);
1463
1464                         ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1465                                               is_invalid_userptr);
1466                         if (ret) {
1467                                 pr_err("Failed to map bo to gpuvm\n");
1468                                 goto map_bo_to_gpuvm_failed;
1469                         }
1470
1471                         ret = vm_update_pds(vm, ctx.sync);
1472                         if (ret) {
1473                                 pr_err("Failed to update page directories\n");
1474                                 goto map_bo_to_gpuvm_failed;
1475                         }
1476
1477                         entry->is_mapped = true;
1478                         mem->mapped_to_gpu_memory++;
1479                         pr_debug("\t INC mapping count %d\n",
1480                                         mem->mapped_to_gpu_memory);
1481                 }
1482         }
1483
1484         if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1485                 amdgpu_bo_fence(bo,
1486                                 &avm->process_info->eviction_fence->base,
1487                                 true);
1488         ret = unreserve_bo_and_vms(&ctx, false, false);
1489
1490         goto out;
1491
1492 map_bo_to_gpuvm_failed:
1493         if (bo_va_entry_aql)
1494                 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1495 add_bo_to_vm_failed_aql:
1496         if (bo_va_entry)
1497                 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1498 add_bo_to_vm_failed:
1499         unreserve_bo_and_vms(&ctx, false, false);
1500 out:
1501         mutex_unlock(&mem->process_info->lock);
1502         mutex_unlock(&mem->lock);
1503         return ret;
1504 }
1505
1506 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1507                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1508 {
1509         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1510         struct amdkfd_process_info *process_info =
1511                 ((struct amdgpu_vm *)vm)->process_info;
1512         unsigned long bo_size = mem->bo->tbo.mem.size;
1513         struct kfd_bo_va_list *entry;
1514         struct bo_vm_reservation_context ctx;
1515         int ret;
1516
1517         mutex_lock(&mem->lock);
1518
1519         ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1520         if (unlikely(ret))
1521                 goto out;
1522         /* If no VMs were reserved, it means the BO wasn't actually mapped */
1523         if (ctx.n_vms == 0) {
1524                 ret = -EINVAL;
1525                 goto unreserve_out;
1526         }
1527
1528         ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1529         if (unlikely(ret))
1530                 goto unreserve_out;
1531
1532         pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1533                 mem->va,
1534                 mem->va + bo_size * (1 + mem->aql_queue),
1535                 vm);
1536
1537         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1538                 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1539                         pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1540                                         entry->va,
1541                                         entry->va + bo_size,
1542                                         entry);
1543
1544                         ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1545                         if (ret == 0) {
1546                                 entry->is_mapped = false;
1547                         } else {
1548                                 pr_err("failed to unmap VA 0x%llx\n",
1549                                                 mem->va);
1550                                 goto unreserve_out;
1551                         }
1552
1553                         mem->mapped_to_gpu_memory--;
1554                         pr_debug("\t DEC mapping count %d\n",
1555                                         mem->mapped_to_gpu_memory);
1556                 }
1557         }
1558
1559         /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1560          * required.
1561          */
1562         if (mem->mapped_to_gpu_memory == 0 &&
1563             !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1564                 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1565                                                 process_info->eviction_fence);
1566
1567 unreserve_out:
1568         unreserve_bo_and_vms(&ctx, false, false);
1569 out:
1570         mutex_unlock(&mem->lock);
1571         return ret;
1572 }
1573
1574 int amdgpu_amdkfd_gpuvm_sync_memory(
1575                 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1576 {
1577         struct amdgpu_sync sync;
1578         int ret;
1579
1580         amdgpu_sync_create(&sync);
1581
1582         mutex_lock(&mem->lock);
1583         amdgpu_sync_clone(&mem->sync, &sync);
1584         mutex_unlock(&mem->lock);
1585
1586         ret = amdgpu_sync_wait(&sync, intr);
1587         amdgpu_sync_free(&sync);
1588         return ret;
1589 }
1590
1591 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1592                 struct kgd_mem *mem, void **kptr, uint64_t *size)
1593 {
1594         int ret;
1595         struct amdgpu_bo *bo = mem->bo;
1596
1597         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1598                 pr_err("userptr can't be mapped to kernel\n");
1599                 return -EINVAL;
1600         }
1601
1602         /* delete kgd_mem from kfd_bo_list to avoid re-validating
1603          * this BO in BO's restoring after eviction.
1604          */
1605         mutex_lock(&mem->process_info->lock);
1606
1607         ret = amdgpu_bo_reserve(bo, true);
1608         if (ret) {
1609                 pr_err("Failed to reserve bo. ret %d\n", ret);
1610                 goto bo_reserve_failed;
1611         }
1612
1613         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1614         if (ret) {
1615                 pr_err("Failed to pin bo. ret %d\n", ret);
1616                 goto pin_failed;
1617         }
1618
1619         ret = amdgpu_bo_kmap(bo, kptr);
1620         if (ret) {
1621                 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1622                 goto kmap_failed;
1623         }
1624
1625         amdgpu_amdkfd_remove_eviction_fence(
1626                 bo, mem->process_info->eviction_fence);
1627         list_del_init(&mem->validate_list.head);
1628
1629         if (size)
1630                 *size = amdgpu_bo_size(bo);
1631
1632         amdgpu_bo_unreserve(bo);
1633
1634         mutex_unlock(&mem->process_info->lock);
1635         return 0;
1636
1637 kmap_failed:
1638         amdgpu_bo_unpin(bo);
1639 pin_failed:
1640         amdgpu_bo_unreserve(bo);
1641 bo_reserve_failed:
1642         mutex_unlock(&mem->process_info->lock);
1643
1644         return ret;
1645 }
1646
1647 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1648                                               struct kfd_vm_fault_info *mem)
1649 {
1650         struct amdgpu_device *adev;
1651
1652         adev = (struct amdgpu_device *)kgd;
1653         if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1654                 *mem = *adev->gmc.vm_fault_info;
1655                 mb();
1656                 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1657         }
1658         return 0;
1659 }
1660
1661 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1662                                       struct dma_buf *dma_buf,
1663                                       uint64_t va, void *vm,
1664                                       struct kgd_mem **mem, uint64_t *size,
1665                                       uint64_t *mmap_offset)
1666 {
1667         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1668         struct drm_gem_object *obj;
1669         struct amdgpu_bo *bo;
1670         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1671
1672         if (dma_buf->ops != &amdgpu_dmabuf_ops)
1673                 /* Can't handle non-graphics buffers */
1674                 return -EINVAL;
1675
1676         obj = dma_buf->priv;
1677         if (obj->dev->dev_private != adev)
1678                 /* Can't handle buffers from other devices */
1679                 return -EINVAL;
1680
1681         bo = gem_to_amdgpu_bo(obj);
1682         if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1683                                     AMDGPU_GEM_DOMAIN_GTT)))
1684                 /* Only VRAM and GTT BOs are supported */
1685                 return -EINVAL;
1686
1687         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1688         if (!*mem)
1689                 return -ENOMEM;
1690
1691         if (size)
1692                 *size = amdgpu_bo_size(bo);
1693
1694         if (mmap_offset)
1695                 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1696
1697         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1698         mutex_init(&(*mem)->lock);
1699         
1700         (*mem)->alloc_flags =
1701                 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1702                 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1703                 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1704                 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1705
1706         drm_gem_object_get(&bo->tbo.base);
1707         (*mem)->bo = bo;
1708         (*mem)->va = va;
1709         (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1710                 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1711         (*mem)->mapped_to_gpu_memory = 0;
1712         (*mem)->process_info = avm->process_info;
1713         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1714         amdgpu_sync_create(&(*mem)->sync);
1715         (*mem)->is_imported = true;
1716
1717         return 0;
1718 }
1719
1720 /* Evict a userptr BO by stopping the queues if necessary
1721  *
1722  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1723  * cannot do any memory allocations, and cannot take any locks that
1724  * are held elsewhere while allocating memory. Therefore this is as
1725  * simple as possible, using atomic counters.
1726  *
1727  * It doesn't do anything to the BO itself. The real work happens in
1728  * restore, where we get updated page addresses. This function only
1729  * ensures that GPU access to the BO is stopped.
1730  */
1731 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1732                                 struct mm_struct *mm)
1733 {
1734         struct amdkfd_process_info *process_info = mem->process_info;
1735         int evicted_bos;
1736         int r = 0;
1737
1738         atomic_inc(&mem->invalid);
1739         evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1740         if (evicted_bos == 1) {
1741                 /* First eviction, stop the queues */
1742                 r = kgd2kfd_quiesce_mm(mm);
1743                 if (r)
1744                         pr_err("Failed to quiesce KFD\n");
1745                 schedule_delayed_work(&process_info->restore_userptr_work,
1746                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1747         }
1748
1749         return r;
1750 }
1751
1752 /* Update invalid userptr BOs
1753  *
1754  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1755  * userptr_inval_list and updates user pages for all BOs that have
1756  * been invalidated since their last update.
1757  */
1758 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1759                                      struct mm_struct *mm)
1760 {
1761         struct kgd_mem *mem, *tmp_mem;
1762         struct amdgpu_bo *bo;
1763         struct ttm_operation_ctx ctx = { false, false };
1764         int invalid, ret;
1765
1766         /* Move all invalidated BOs to the userptr_inval_list and
1767          * release their user pages by migration to the CPU domain
1768          */
1769         list_for_each_entry_safe(mem, tmp_mem,
1770                                  &process_info->userptr_valid_list,
1771                                  validate_list.head) {
1772                 if (!atomic_read(&mem->invalid))
1773                         continue; /* BO is still valid */
1774
1775                 bo = mem->bo;
1776
1777                 if (amdgpu_bo_reserve(bo, true))
1778                         return -EAGAIN;
1779                 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1780                 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1781                 amdgpu_bo_unreserve(bo);
1782                 if (ret) {
1783                         pr_err("%s: Failed to invalidate userptr BO\n",
1784                                __func__);
1785                         return -EAGAIN;
1786                 }
1787
1788                 list_move_tail(&mem->validate_list.head,
1789                                &process_info->userptr_inval_list);
1790         }
1791
1792         if (list_empty(&process_info->userptr_inval_list))
1793                 return 0; /* All evicted userptr BOs were freed */
1794
1795         /* Go through userptr_inval_list and update any invalid user_pages */
1796         list_for_each_entry(mem, &process_info->userptr_inval_list,
1797                             validate_list.head) {
1798                 invalid = atomic_read(&mem->invalid);
1799                 if (!invalid)
1800                         /* BO hasn't been invalidated since the last
1801                          * revalidation attempt. Keep its BO list.
1802                          */
1803                         continue;
1804
1805                 bo = mem->bo;
1806
1807                 /* Get updated user pages */
1808                 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1809                 if (ret) {
1810                         pr_debug("%s: Failed to get user pages: %d\n",
1811                                 __func__, ret);
1812
1813                         /* Return error -EBUSY or -ENOMEM, retry restore */
1814                         return ret;
1815                 }
1816
1817                 /*
1818                  * FIXME: Cannot ignore the return code, must hold
1819                  * notifier_lock
1820                  */
1821                 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1822
1823                 /* Mark the BO as valid unless it was invalidated
1824                  * again concurrently.
1825                  */
1826                 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1827                         return -EAGAIN;
1828         }
1829
1830         return 0;
1831 }
1832
1833 /* Validate invalid userptr BOs
1834  *
1835  * Validates BOs on the userptr_inval_list, and moves them back to the
1836  * userptr_valid_list. Also updates GPUVM page tables with new page
1837  * addresses and waits for the page table updates to complete.
1838  */
1839 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1840 {
1841         struct amdgpu_bo_list_entry *pd_bo_list_entries;
1842         struct list_head resv_list, duplicates;
1843         struct ww_acquire_ctx ticket;
1844         struct amdgpu_sync sync;
1845
1846         struct amdgpu_vm *peer_vm;
1847         struct kgd_mem *mem, *tmp_mem;
1848         struct amdgpu_bo *bo;
1849         struct ttm_operation_ctx ctx = { false, false };
1850         int i, ret;
1851
1852         pd_bo_list_entries = kcalloc(process_info->n_vms,
1853                                      sizeof(struct amdgpu_bo_list_entry),
1854                                      GFP_KERNEL);
1855         if (!pd_bo_list_entries) {
1856                 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1857                 ret = -ENOMEM;
1858                 goto out_no_mem;
1859         }
1860
1861         INIT_LIST_HEAD(&resv_list);
1862         INIT_LIST_HEAD(&duplicates);
1863
1864         /* Get all the page directory BOs that need to be reserved */
1865         i = 0;
1866         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1867                             vm_list_node)
1868                 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1869                                     &pd_bo_list_entries[i++]);
1870         /* Add the userptr_inval_list entries to resv_list */
1871         list_for_each_entry(mem, &process_info->userptr_inval_list,
1872                             validate_list.head) {
1873                 list_add_tail(&mem->resv_list.head, &resv_list);
1874                 mem->resv_list.bo = mem->validate_list.bo;
1875                 mem->resv_list.num_shared = mem->validate_list.num_shared;
1876         }
1877
1878         /* Reserve all BOs and page tables for validation */
1879         ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1880         WARN(!list_empty(&duplicates), "Duplicates should be empty");
1881         if (ret)
1882                 goto out_free;
1883
1884         amdgpu_sync_create(&sync);
1885
1886         ret = process_validate_vms(process_info);
1887         if (ret)
1888                 goto unreserve_out;
1889
1890         /* Validate BOs and update GPUVM page tables */
1891         list_for_each_entry_safe(mem, tmp_mem,
1892                                  &process_info->userptr_inval_list,
1893                                  validate_list.head) {
1894                 struct kfd_bo_va_list *bo_va_entry;
1895
1896                 bo = mem->bo;
1897
1898                 /* Validate the BO if we got user pages */
1899                 if (bo->tbo.ttm->pages[0]) {
1900                         amdgpu_bo_placement_from_domain(bo, mem->domain);
1901                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1902                         if (ret) {
1903                                 pr_err("%s: failed to validate BO\n", __func__);
1904                                 goto unreserve_out;
1905                         }
1906                 }
1907
1908                 list_move_tail(&mem->validate_list.head,
1909                                &process_info->userptr_valid_list);
1910
1911                 /* Update mapping. If the BO was not validated
1912                  * (because we couldn't get user pages), this will
1913                  * clear the page table entries, which will result in
1914                  * VM faults if the GPU tries to access the invalid
1915                  * memory.
1916                  */
1917                 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1918                         if (!bo_va_entry->is_mapped)
1919                                 continue;
1920
1921                         ret = update_gpuvm_pte((struct amdgpu_device *)
1922                                                bo_va_entry->kgd_dev,
1923                                                bo_va_entry, &sync);
1924                         if (ret) {
1925                                 pr_err("%s: update PTE failed\n", __func__);
1926                                 /* make sure this gets validated again */
1927                                 atomic_inc(&mem->invalid);
1928                                 goto unreserve_out;
1929                         }
1930                 }
1931         }
1932
1933         /* Update page directories */
1934         ret = process_update_pds(process_info, &sync);
1935
1936 unreserve_out:
1937         ttm_eu_backoff_reservation(&ticket, &resv_list);
1938         amdgpu_sync_wait(&sync, false);
1939         amdgpu_sync_free(&sync);
1940 out_free:
1941         kfree(pd_bo_list_entries);
1942 out_no_mem:
1943
1944         return ret;
1945 }
1946
1947 /* Worker callback to restore evicted userptr BOs
1948  *
1949  * Tries to update and validate all userptr BOs. If successful and no
1950  * concurrent evictions happened, the queues are restarted. Otherwise,
1951  * reschedule for another attempt later.
1952  */
1953 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1954 {
1955         struct delayed_work *dwork = to_delayed_work(work);
1956         struct amdkfd_process_info *process_info =
1957                 container_of(dwork, struct amdkfd_process_info,
1958                              restore_userptr_work);
1959         struct task_struct *usertask;
1960         struct mm_struct *mm;
1961         int evicted_bos;
1962
1963         evicted_bos = atomic_read(&process_info->evicted_bos);
1964         if (!evicted_bos)
1965                 return;
1966
1967         /* Reference task and mm in case of concurrent process termination */
1968         usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1969         if (!usertask)
1970                 return;
1971         mm = get_task_mm(usertask);
1972         if (!mm) {
1973                 put_task_struct(usertask);
1974                 return;
1975         }
1976
1977         mutex_lock(&process_info->lock);
1978
1979         if (update_invalid_user_pages(process_info, mm))
1980                 goto unlock_out;
1981         /* userptr_inval_list can be empty if all evicted userptr BOs
1982          * have been freed. In that case there is nothing to validate
1983          * and we can just restart the queues.
1984          */
1985         if (!list_empty(&process_info->userptr_inval_list)) {
1986                 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1987                         goto unlock_out; /* Concurrent eviction, try again */
1988
1989                 if (validate_invalid_user_pages(process_info))
1990                         goto unlock_out;
1991         }
1992         /* Final check for concurrent evicton and atomic update. If
1993          * another eviction happens after successful update, it will
1994          * be a first eviction that calls quiesce_mm. The eviction
1995          * reference counting inside KFD will handle this case.
1996          */
1997         if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1998             evicted_bos)
1999                 goto unlock_out;
2000         evicted_bos = 0;
2001         if (kgd2kfd_resume_mm(mm)) {
2002                 pr_err("%s: Failed to resume KFD\n", __func__);
2003                 /* No recovery from this failure. Probably the CP is
2004                  * hanging. No point trying again.
2005                  */
2006         }
2007
2008 unlock_out:
2009         mutex_unlock(&process_info->lock);
2010         mmput(mm);
2011         put_task_struct(usertask);
2012
2013         /* If validation failed, reschedule another attempt */
2014         if (evicted_bos)
2015                 schedule_delayed_work(&process_info->restore_userptr_work,
2016                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2017 }
2018
2019 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2020  *   KFD process identified by process_info
2021  *
2022  * @process_info: amdkfd_process_info of the KFD process
2023  *
2024  * After memory eviction, restore thread calls this function. The function
2025  * should be called when the Process is still valid. BO restore involves -
2026  *
2027  * 1.  Release old eviction fence and create new one
2028  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2029  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2030  *     BOs that need to be reserved.
2031  * 4.  Reserve all the BOs
2032  * 5.  Validate of PD and PT BOs.
2033  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2034  * 7.  Add fence to all PD and PT BOs.
2035  * 8.  Unreserve all BOs
2036  */
2037 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2038 {
2039         struct amdgpu_bo_list_entry *pd_bo_list;
2040         struct amdkfd_process_info *process_info = info;
2041         struct amdgpu_vm *peer_vm;
2042         struct kgd_mem *mem;
2043         struct bo_vm_reservation_context ctx;
2044         struct amdgpu_amdkfd_fence *new_fence;
2045         int ret = 0, i;
2046         struct list_head duplicate_save;
2047         struct amdgpu_sync sync_obj;
2048
2049         INIT_LIST_HEAD(&duplicate_save);
2050         INIT_LIST_HEAD(&ctx.list);
2051         INIT_LIST_HEAD(&ctx.duplicates);
2052
2053         pd_bo_list = kcalloc(process_info->n_vms,
2054                              sizeof(struct amdgpu_bo_list_entry),
2055                              GFP_KERNEL);
2056         if (!pd_bo_list)
2057                 return -ENOMEM;
2058
2059         i = 0;
2060         mutex_lock(&process_info->lock);
2061         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2062                         vm_list_node)
2063                 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2064
2065         /* Reserve all BOs and page tables/directory. Add all BOs from
2066          * kfd_bo_list to ctx.list
2067          */
2068         list_for_each_entry(mem, &process_info->kfd_bo_list,
2069                             validate_list.head) {
2070
2071                 list_add_tail(&mem->resv_list.head, &ctx.list);
2072                 mem->resv_list.bo = mem->validate_list.bo;
2073                 mem->resv_list.num_shared = mem->validate_list.num_shared;
2074         }
2075
2076         ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2077                                      false, &duplicate_save);
2078         if (ret) {
2079                 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2080                 goto ttm_reserve_fail;
2081         }
2082
2083         amdgpu_sync_create(&sync_obj);
2084
2085         /* Validate PDs and PTs */
2086         ret = process_validate_vms(process_info);
2087         if (ret)
2088                 goto validate_map_fail;
2089
2090         ret = process_sync_pds_resv(process_info, &sync_obj);
2091         if (ret) {
2092                 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2093                 goto validate_map_fail;
2094         }
2095
2096         /* Validate BOs and map them to GPUVM (update VM page tables). */
2097         list_for_each_entry(mem, &process_info->kfd_bo_list,
2098                             validate_list.head) {
2099
2100                 struct amdgpu_bo *bo = mem->bo;
2101                 uint32_t domain = mem->domain;
2102                 struct kfd_bo_va_list *bo_va_entry;
2103
2104                 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2105                 if (ret) {
2106                         pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2107                         goto validate_map_fail;
2108                 }
2109                 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2110                 if (ret) {
2111                         pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2112                         goto validate_map_fail;
2113                 }
2114                 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2115                                     bo_list) {
2116                         ret = update_gpuvm_pte((struct amdgpu_device *)
2117                                               bo_va_entry->kgd_dev,
2118                                               bo_va_entry,
2119                                               &sync_obj);
2120                         if (ret) {
2121                                 pr_debug("Memory eviction: update PTE failed. Try again\n");
2122                                 goto validate_map_fail;
2123                         }
2124                 }
2125         }
2126
2127         /* Update page directories */
2128         ret = process_update_pds(process_info, &sync_obj);
2129         if (ret) {
2130                 pr_debug("Memory eviction: update PDs failed. Try again\n");
2131                 goto validate_map_fail;
2132         }
2133
2134         /* Wait for validate and PT updates to finish */
2135         amdgpu_sync_wait(&sync_obj, false);
2136
2137         /* Release old eviction fence and create new one, because fence only
2138          * goes from unsignaled to signaled, fence cannot be reused.
2139          * Use context and mm from the old fence.
2140          */
2141         new_fence = amdgpu_amdkfd_fence_create(
2142                                 process_info->eviction_fence->base.context,
2143                                 process_info->eviction_fence->mm);
2144         if (!new_fence) {
2145                 pr_err("Failed to create eviction fence\n");
2146                 ret = -ENOMEM;
2147                 goto validate_map_fail;
2148         }
2149         dma_fence_put(&process_info->eviction_fence->base);
2150         process_info->eviction_fence = new_fence;
2151         *ef = dma_fence_get(&new_fence->base);
2152
2153         /* Attach new eviction fence to all BOs */
2154         list_for_each_entry(mem, &process_info->kfd_bo_list,
2155                 validate_list.head)
2156                 amdgpu_bo_fence(mem->bo,
2157                         &process_info->eviction_fence->base, true);
2158
2159         /* Attach eviction fence to PD / PT BOs */
2160         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2161                             vm_list_node) {
2162                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2163
2164                 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2165         }
2166
2167 validate_map_fail:
2168         ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2169         amdgpu_sync_free(&sync_obj);
2170 ttm_reserve_fail:
2171         mutex_unlock(&process_info->lock);
2172         kfree(pd_bo_list);
2173         return ret;
2174 }
2175
2176 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2177 {
2178         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2179         struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2180         int ret;
2181
2182         if (!info || !gws)
2183                 return -EINVAL;
2184
2185         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2186         if (!*mem)
2187                 return -ENOMEM;
2188
2189         mutex_init(&(*mem)->lock);
2190         INIT_LIST_HEAD(&(*mem)->bo_va_list);
2191         (*mem)->bo = amdgpu_bo_ref(gws_bo);
2192         (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2193         (*mem)->process_info = process_info;
2194         add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2195         amdgpu_sync_create(&(*mem)->sync);
2196
2197
2198         /* Validate gws bo the first time it is added to process */
2199         mutex_lock(&(*mem)->process_info->lock);
2200         ret = amdgpu_bo_reserve(gws_bo, false);
2201         if (unlikely(ret)) {
2202                 pr_err("Reserve gws bo failed %d\n", ret);
2203                 goto bo_reservation_failure;
2204         }
2205
2206         ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2207         if (ret) {
2208                 pr_err("GWS BO validate failed %d\n", ret);
2209                 goto bo_validation_failure;
2210         }
2211         /* GWS resource is shared b/t amdgpu and amdkfd
2212          * Add process eviction fence to bo so they can
2213          * evict each other.
2214          */
2215         ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2216         if (ret)
2217                 goto reserve_shared_fail;
2218         amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2219         amdgpu_bo_unreserve(gws_bo);
2220         mutex_unlock(&(*mem)->process_info->lock);
2221
2222         return ret;
2223
2224 reserve_shared_fail:
2225 bo_validation_failure:
2226         amdgpu_bo_unreserve(gws_bo);
2227 bo_reservation_failure:
2228         mutex_unlock(&(*mem)->process_info->lock);
2229         amdgpu_sync_free(&(*mem)->sync);
2230         remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2231         amdgpu_bo_unref(&gws_bo);
2232         mutex_destroy(&(*mem)->lock);
2233         kfree(*mem);
2234         *mem = NULL;
2235         return ret;
2236 }
2237
2238 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2239 {
2240         int ret;
2241         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2242         struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2243         struct amdgpu_bo *gws_bo = kgd_mem->bo;
2244
2245         /* Remove BO from process's validate list so restore worker won't touch
2246          * it anymore
2247          */
2248         remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2249
2250         ret = amdgpu_bo_reserve(gws_bo, false);
2251         if (unlikely(ret)) {
2252                 pr_err("Reserve gws bo failed %d\n", ret);
2253                 //TODO add BO back to validate_list?
2254                 return ret;
2255         }
2256         amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2257                         process_info->eviction_fence);
2258         amdgpu_bo_unreserve(gws_bo);
2259         amdgpu_sync_free(&kgd_mem->sync);
2260         amdgpu_bo_unref(&gws_bo);
2261         mutex_destroy(&kgd_mem->lock);
2262         kfree(mem);
2263         return 0;
2264 }
2265
2266 /* Returns GPU-specific tiling mode information */
2267 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2268                                 struct tile_config *config)
2269 {
2270         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2271
2272         config->gb_addr_config = adev->gfx.config.gb_addr_config;
2273         config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2274         config->num_tile_configs =
2275                         ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2276         config->macro_tile_config_ptr =
2277                         adev->gfx.config.macrotile_mode_array;
2278         config->num_macro_tile_configs =
2279                         ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2280
2281         /* Those values are not set from GFX9 onwards */
2282         config->num_banks = adev->gfx.config.num_banks;
2283         config->num_ranks = adev->gfx.config.num_ranks;
2284
2285         return 0;
2286 }
This page took 0.171381 seconds and 4 git commands to generate.