]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
Merge tag 'du-next-20190608-2' of git://linuxtv.org/pinchartl/media into drm-next
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #define pr_fmt(fmt) "kfd2kgd: " fmt
24
25 #include <linux/list.h>
26 #include <linux/pagemap.h>
27 #include <linux/sched/mm.h>
28 #include <linux/dma-buf.h>
29 #include <drm/drmP.h>
30 #include "amdgpu_object.h"
31 #include "amdgpu_vm.h"
32 #include "amdgpu_amdkfd.h"
33 #include "amdgpu_dma_buf.h"
34
35 /* Special VM and GART address alignment needed for VI pre-Fiji due to
36  * a HW bug.
37  */
38 #define VI_BO_SIZE_ALIGN (0x8000)
39
40 /* BO flag to indicate a KFD userptr BO */
41 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
42
43 /* Userptr restore delay, just long enough to allow consecutive VM
44  * changes to accumulate
45  */
46 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
47
48 /* Impose limit on how much memory KFD can use */
49 static struct {
50         uint64_t max_system_mem_limit;
51         uint64_t max_ttm_mem_limit;
52         int64_t system_mem_used;
53         int64_t ttm_mem_used;
54         spinlock_t mem_limit_lock;
55 } kfd_mem_limit;
56
57 /* Struct used for amdgpu_amdkfd_bo_validate */
58 struct amdgpu_vm_parser {
59         uint32_t        domain;
60         bool            wait;
61 };
62
63 static const char * const domain_bit_to_string[] = {
64                 "CPU",
65                 "GTT",
66                 "VRAM",
67                 "GDS",
68                 "GWS",
69                 "OA"
70 };
71
72 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
73
74 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
75
76
77 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
78 {
79         return (struct amdgpu_device *)kgd;
80 }
81
82 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
83                 struct kgd_mem *mem)
84 {
85         struct kfd_bo_va_list *entry;
86
87         list_for_each_entry(entry, &mem->bo_va_list, bo_list)
88                 if (entry->bo_va->base.vm == avm)
89                         return false;
90
91         return true;
92 }
93
94 /* Set memory usage limits. Current, limits are
95  *  System (TTM + userptr) memory - 3/4th System RAM
96  *  TTM memory - 3/8th System RAM
97  */
98 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
99 {
100         struct sysinfo si;
101         uint64_t mem;
102
103         si_meminfo(&si);
104         mem = si.totalram - si.totalhigh;
105         mem *= si.mem_unit;
106
107         spin_lock_init(&kfd_mem_limit.mem_limit_lock);
108         kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2);
109         kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
110         pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
111                 (kfd_mem_limit.max_system_mem_limit >> 20),
112                 (kfd_mem_limit.max_ttm_mem_limit >> 20));
113 }
114
115 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
116                 uint64_t size, u32 domain, bool sg)
117 {
118         size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
119         uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9;
120         int ret = 0;
121
122         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
123                                        sizeof(struct amdgpu_bo));
124
125         vram_needed = 0;
126         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
127                 /* TTM GTT memory */
128                 system_mem_needed = acc_size + size;
129                 ttm_mem_needed = acc_size + size;
130         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
131                 /* Userptr */
132                 system_mem_needed = acc_size + size;
133                 ttm_mem_needed = acc_size;
134         } else {
135                 /* VRAM and SG */
136                 system_mem_needed = acc_size;
137                 ttm_mem_needed = acc_size;
138                 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
139                         vram_needed = size;
140         }
141
142         spin_lock(&kfd_mem_limit.mem_limit_lock);
143
144         if ((kfd_mem_limit.system_mem_used + system_mem_needed >
145              kfd_mem_limit.max_system_mem_limit) ||
146             (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
147              kfd_mem_limit.max_ttm_mem_limit) ||
148             (adev->kfd.vram_used + vram_needed >
149              adev->gmc.real_vram_size - reserved_for_pt)) {
150                 ret = -ENOMEM;
151         } else {
152                 kfd_mem_limit.system_mem_used += system_mem_needed;
153                 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
154                 adev->kfd.vram_used += vram_needed;
155         }
156
157         spin_unlock(&kfd_mem_limit.mem_limit_lock);
158         return ret;
159 }
160
161 static void unreserve_mem_limit(struct amdgpu_device *adev,
162                 uint64_t size, u32 domain, bool sg)
163 {
164         size_t acc_size;
165
166         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
167                                        sizeof(struct amdgpu_bo));
168
169         spin_lock(&kfd_mem_limit.mem_limit_lock);
170         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
171                 kfd_mem_limit.system_mem_used -= (acc_size + size);
172                 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
173         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
174                 kfd_mem_limit.system_mem_used -= (acc_size + size);
175                 kfd_mem_limit.ttm_mem_used -= acc_size;
176         } else {
177                 kfd_mem_limit.system_mem_used -= acc_size;
178                 kfd_mem_limit.ttm_mem_used -= acc_size;
179                 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
180                         adev->kfd.vram_used -= size;
181                         WARN_ONCE(adev->kfd.vram_used < 0,
182                                   "kfd VRAM memory accounting unbalanced");
183                 }
184         }
185         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
186                   "kfd system memory accounting unbalanced");
187         WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
188                   "kfd TTM memory accounting unbalanced");
189
190         spin_unlock(&kfd_mem_limit.mem_limit_lock);
191 }
192
193 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
194 {
195         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
196         u32 domain = bo->preferred_domains;
197         bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
198
199         if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
200                 domain = AMDGPU_GEM_DOMAIN_CPU;
201                 sg = false;
202         }
203
204         unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
205 }
206
207
208 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
209  *  reservation object.
210  *
211  * @bo: [IN] Remove eviction fence(s) from this BO
212  * @ef: [IN] This eviction fence is removed if it
213  *  is present in the shared list.
214  *
215  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
216  */
217 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
218                                         struct amdgpu_amdkfd_fence *ef)
219 {
220         struct reservation_object *resv = bo->tbo.resv;
221         struct reservation_object_list *old, *new;
222         unsigned int i, j, k;
223
224         if (!ef)
225                 return -EINVAL;
226
227         old = reservation_object_get_list(resv);
228         if (!old)
229                 return 0;
230
231         new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
232                       GFP_KERNEL);
233         if (!new)
234                 return -ENOMEM;
235
236         /* Go through all the shared fences in the resevation object and sort
237          * the interesting ones to the end of the list.
238          */
239         for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
240                 struct dma_fence *f;
241
242                 f = rcu_dereference_protected(old->shared[i],
243                                               reservation_object_held(resv));
244
245                 if (f->context == ef->base.context)
246                         RCU_INIT_POINTER(new->shared[--j], f);
247                 else
248                         RCU_INIT_POINTER(new->shared[k++], f);
249         }
250         new->shared_max = old->shared_max;
251         new->shared_count = k;
252
253         /* Install the new fence list, seqcount provides the barriers */
254         preempt_disable();
255         write_seqcount_begin(&resv->seq);
256         RCU_INIT_POINTER(resv->fence, new);
257         write_seqcount_end(&resv->seq);
258         preempt_enable();
259
260         /* Drop the references to the removed fences or move them to ef_list */
261         for (i = j, k = 0; i < old->shared_count; ++i) {
262                 struct dma_fence *f;
263
264                 f = rcu_dereference_protected(new->shared[i],
265                                               reservation_object_held(resv));
266                 dma_fence_put(f);
267         }
268         kfree_rcu(old, rcu);
269
270         return 0;
271 }
272
273 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
274                                      bool wait)
275 {
276         struct ttm_operation_ctx ctx = { false, false };
277         int ret;
278
279         if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
280                  "Called with userptr BO"))
281                 return -EINVAL;
282
283         amdgpu_bo_placement_from_domain(bo, domain);
284
285         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
286         if (ret)
287                 goto validate_fail;
288         if (wait)
289                 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
290
291 validate_fail:
292         return ret;
293 }
294
295 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
296 {
297         struct amdgpu_vm_parser *p = param;
298
299         return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
300 }
301
302 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
303  *
304  * Page directories are not updated here because huge page handling
305  * during page table updates can invalidate page directory entries
306  * again. Page directories are only updated after updating page
307  * tables.
308  */
309 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
310 {
311         struct amdgpu_bo *pd = vm->root.base.bo;
312         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
313         struct amdgpu_vm_parser param;
314         int ret;
315
316         param.domain = AMDGPU_GEM_DOMAIN_VRAM;
317         param.wait = false;
318
319         ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
320                                         &param);
321         if (ret) {
322                 pr_err("amdgpu: failed to validate PT BOs\n");
323                 return ret;
324         }
325
326         ret = amdgpu_amdkfd_validate(&param, pd);
327         if (ret) {
328                 pr_err("amdgpu: failed to validate PD\n");
329                 return ret;
330         }
331
332         vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
333
334         if (vm->use_cpu_for_update) {
335                 ret = amdgpu_bo_kmap(pd, NULL);
336                 if (ret) {
337                         pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
338                         return ret;
339                 }
340         }
341
342         return 0;
343 }
344
345 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
346 {
347         struct amdgpu_bo *pd = vm->root.base.bo;
348         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
349         int ret;
350
351         ret = amdgpu_vm_update_directories(adev, vm);
352         if (ret)
353                 return ret;
354
355         return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
356 }
357
358 /* add_bo_to_vm - Add a BO to a VM
359  *
360  * Everything that needs to bo done only once when a BO is first added
361  * to a VM. It can later be mapped and unmapped many times without
362  * repeating these steps.
363  *
364  * 1. Allocate and initialize BO VA entry data structure
365  * 2. Add BO to the VM
366  * 3. Determine ASIC-specific PTE flags
367  * 4. Alloc page tables and directories if needed
368  * 4a.  Validate new page tables and directories
369  */
370 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
371                 struct amdgpu_vm *vm, bool is_aql,
372                 struct kfd_bo_va_list **p_bo_va_entry)
373 {
374         int ret;
375         struct kfd_bo_va_list *bo_va_entry;
376         struct amdgpu_bo *bo = mem->bo;
377         uint64_t va = mem->va;
378         struct list_head *list_bo_va = &mem->bo_va_list;
379         unsigned long bo_size = bo->tbo.mem.size;
380
381         if (!va) {
382                 pr_err("Invalid VA when adding BO to VM\n");
383                 return -EINVAL;
384         }
385
386         if (is_aql)
387                 va += bo_size;
388
389         bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
390         if (!bo_va_entry)
391                 return -ENOMEM;
392
393         pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
394                         va + bo_size, vm);
395
396         /* Add BO to VM internal data structures*/
397         bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
398         if (!bo_va_entry->bo_va) {
399                 ret = -EINVAL;
400                 pr_err("Failed to add BO object to VM. ret == %d\n",
401                                 ret);
402                 goto err_vmadd;
403         }
404
405         bo_va_entry->va = va;
406         bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev,
407                                                          mem->mapping_flags);
408         bo_va_entry->kgd_dev = (void *)adev;
409         list_add(&bo_va_entry->bo_list, list_bo_va);
410
411         if (p_bo_va_entry)
412                 *p_bo_va_entry = bo_va_entry;
413
414         /* Allocate validate page tables if needed */
415         ret = vm_validate_pt_pd_bos(vm);
416         if (ret) {
417                 pr_err("validate_pt_pd_bos() failed\n");
418                 goto err_alloc_pts;
419         }
420
421         return 0;
422
423 err_alloc_pts:
424         amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
425         list_del(&bo_va_entry->bo_list);
426 err_vmadd:
427         kfree(bo_va_entry);
428         return ret;
429 }
430
431 static void remove_bo_from_vm(struct amdgpu_device *adev,
432                 struct kfd_bo_va_list *entry, unsigned long size)
433 {
434         pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
435                         entry->va,
436                         entry->va + size, entry);
437         amdgpu_vm_bo_rmv(adev, entry->bo_va);
438         list_del(&entry->bo_list);
439         kfree(entry);
440 }
441
442 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
443                                 struct amdkfd_process_info *process_info,
444                                 bool userptr)
445 {
446         struct ttm_validate_buffer *entry = &mem->validate_list;
447         struct amdgpu_bo *bo = mem->bo;
448
449         INIT_LIST_HEAD(&entry->head);
450         entry->num_shared = 1;
451         entry->bo = &bo->tbo;
452         mutex_lock(&process_info->lock);
453         if (userptr)
454                 list_add_tail(&entry->head, &process_info->userptr_valid_list);
455         else
456                 list_add_tail(&entry->head, &process_info->kfd_bo_list);
457         mutex_unlock(&process_info->lock);
458 }
459
460 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
461                 struct amdkfd_process_info *process_info)
462 {
463         struct ttm_validate_buffer *bo_list_entry;
464
465         bo_list_entry = &mem->validate_list;
466         mutex_lock(&process_info->lock);
467         list_del(&bo_list_entry->head);
468         mutex_unlock(&process_info->lock);
469 }
470
471 /* Initializes user pages. It registers the MMU notifier and validates
472  * the userptr BO in the GTT domain.
473  *
474  * The BO must already be on the userptr_valid_list. Otherwise an
475  * eviction and restore may happen that leaves the new BO unmapped
476  * with the user mode queues running.
477  *
478  * Takes the process_info->lock to protect against concurrent restore
479  * workers.
480  *
481  * Returns 0 for success, negative errno for errors.
482  */
483 static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
484                            uint64_t user_addr)
485 {
486         struct amdkfd_process_info *process_info = mem->process_info;
487         struct amdgpu_bo *bo = mem->bo;
488         struct ttm_operation_ctx ctx = { true, false };
489         int ret = 0;
490
491         mutex_lock(&process_info->lock);
492
493         ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
494         if (ret) {
495                 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
496                 goto out;
497         }
498
499         ret = amdgpu_mn_register(bo, user_addr);
500         if (ret) {
501                 pr_err("%s: Failed to register MMU notifier: %d\n",
502                        __func__, ret);
503                 goto out;
504         }
505
506         ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, bo->tbo.ttm->pages);
507         if (ret) {
508                 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
509                 goto unregister_out;
510         }
511
512         ret = amdgpu_bo_reserve(bo, true);
513         if (ret) {
514                 pr_err("%s: Failed to reserve BO\n", __func__);
515                 goto release_out;
516         }
517         amdgpu_bo_placement_from_domain(bo, mem->domain);
518         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
519         if (ret)
520                 pr_err("%s: failed to validate BO\n", __func__);
521         amdgpu_bo_unreserve(bo);
522
523 release_out:
524         amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
525 unregister_out:
526         if (ret)
527                 amdgpu_mn_unregister(bo);
528 out:
529         mutex_unlock(&process_info->lock);
530         return ret;
531 }
532
533 /* Reserving a BO and its page table BOs must happen atomically to
534  * avoid deadlocks. Some operations update multiple VMs at once. Track
535  * all the reservation info in a context structure. Optionally a sync
536  * object can track VM updates.
537  */
538 struct bo_vm_reservation_context {
539         struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
540         unsigned int n_vms;                 /* Number of VMs reserved       */
541         struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
542         struct ww_acquire_ctx ticket;       /* Reservation ticket           */
543         struct list_head list, duplicates;  /* BO lists                     */
544         struct amdgpu_sync *sync;           /* Pointer to sync object       */
545         bool reserved;                      /* Whether BOs are reserved     */
546 };
547
548 enum bo_vm_match {
549         BO_VM_NOT_MAPPED = 0,   /* Match VMs where a BO is not mapped */
550         BO_VM_MAPPED,           /* Match VMs where a BO is mapped     */
551         BO_VM_ALL,              /* Match all VMs a BO was added to    */
552 };
553
554 /**
555  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
556  * @mem: KFD BO structure.
557  * @vm: the VM to reserve.
558  * @ctx: the struct that will be used in unreserve_bo_and_vms().
559  */
560 static int reserve_bo_and_vm(struct kgd_mem *mem,
561                               struct amdgpu_vm *vm,
562                               struct bo_vm_reservation_context *ctx)
563 {
564         struct amdgpu_bo *bo = mem->bo;
565         int ret;
566
567         WARN_ON(!vm);
568
569         ctx->reserved = false;
570         ctx->n_vms = 1;
571         ctx->sync = &mem->sync;
572
573         INIT_LIST_HEAD(&ctx->list);
574         INIT_LIST_HEAD(&ctx->duplicates);
575
576         ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
577         if (!ctx->vm_pd)
578                 return -ENOMEM;
579
580         ctx->kfd_bo.priority = 0;
581         ctx->kfd_bo.tv.bo = &bo->tbo;
582         ctx->kfd_bo.tv.num_shared = 1;
583         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
584
585         amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
586
587         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
588                                      false, &ctx->duplicates, true);
589         if (!ret)
590                 ctx->reserved = true;
591         else {
592                 pr_err("Failed to reserve buffers in ttm\n");
593                 kfree(ctx->vm_pd);
594                 ctx->vm_pd = NULL;
595         }
596
597         return ret;
598 }
599
600 /**
601  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
602  * @mem: KFD BO structure.
603  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
604  * is used. Otherwise, a single VM associated with the BO.
605  * @map_type: the mapping status that will be used to filter the VMs.
606  * @ctx: the struct that will be used in unreserve_bo_and_vms().
607  *
608  * Returns 0 for success, negative for failure.
609  */
610 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
611                                 struct amdgpu_vm *vm, enum bo_vm_match map_type,
612                                 struct bo_vm_reservation_context *ctx)
613 {
614         struct amdgpu_bo *bo = mem->bo;
615         struct kfd_bo_va_list *entry;
616         unsigned int i;
617         int ret;
618
619         ctx->reserved = false;
620         ctx->n_vms = 0;
621         ctx->vm_pd = NULL;
622         ctx->sync = &mem->sync;
623
624         INIT_LIST_HEAD(&ctx->list);
625         INIT_LIST_HEAD(&ctx->duplicates);
626
627         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
628                 if ((vm && vm != entry->bo_va->base.vm) ||
629                         (entry->is_mapped != map_type
630                         && map_type != BO_VM_ALL))
631                         continue;
632
633                 ctx->n_vms++;
634         }
635
636         if (ctx->n_vms != 0) {
637                 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
638                                      GFP_KERNEL);
639                 if (!ctx->vm_pd)
640                         return -ENOMEM;
641         }
642
643         ctx->kfd_bo.priority = 0;
644         ctx->kfd_bo.tv.bo = &bo->tbo;
645         ctx->kfd_bo.tv.num_shared = 1;
646         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
647
648         i = 0;
649         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
650                 if ((vm && vm != entry->bo_va->base.vm) ||
651                         (entry->is_mapped != map_type
652                         && map_type != BO_VM_ALL))
653                         continue;
654
655                 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
656                                 &ctx->vm_pd[i]);
657                 i++;
658         }
659
660         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
661                                      false, &ctx->duplicates, true);
662         if (!ret)
663                 ctx->reserved = true;
664         else
665                 pr_err("Failed to reserve buffers in ttm.\n");
666
667         if (ret) {
668                 kfree(ctx->vm_pd);
669                 ctx->vm_pd = NULL;
670         }
671
672         return ret;
673 }
674
675 /**
676  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
677  * @ctx: Reservation context to unreserve
678  * @wait: Optionally wait for a sync object representing pending VM updates
679  * @intr: Whether the wait is interruptible
680  *
681  * Also frees any resources allocated in
682  * reserve_bo_and_(cond_)vm(s). Returns the status from
683  * amdgpu_sync_wait.
684  */
685 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
686                                  bool wait, bool intr)
687 {
688         int ret = 0;
689
690         if (wait)
691                 ret = amdgpu_sync_wait(ctx->sync, intr);
692
693         if (ctx->reserved)
694                 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
695         kfree(ctx->vm_pd);
696
697         ctx->sync = NULL;
698
699         ctx->reserved = false;
700         ctx->vm_pd = NULL;
701
702         return ret;
703 }
704
705 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
706                                 struct kfd_bo_va_list *entry,
707                                 struct amdgpu_sync *sync)
708 {
709         struct amdgpu_bo_va *bo_va = entry->bo_va;
710         struct amdgpu_vm *vm = bo_va->base.vm;
711
712         amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
713
714         amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
715
716         amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
717
718         return 0;
719 }
720
721 static int update_gpuvm_pte(struct amdgpu_device *adev,
722                 struct kfd_bo_va_list *entry,
723                 struct amdgpu_sync *sync)
724 {
725         int ret;
726         struct amdgpu_bo_va *bo_va = entry->bo_va;
727
728         /* Update the page tables  */
729         ret = amdgpu_vm_bo_update(adev, bo_va, false);
730         if (ret) {
731                 pr_err("amdgpu_vm_bo_update failed\n");
732                 return ret;
733         }
734
735         return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
736 }
737
738 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
739                 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
740                 bool no_update_pte)
741 {
742         int ret;
743
744         /* Set virtual address for the allocation */
745         ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
746                                amdgpu_bo_size(entry->bo_va->base.bo),
747                                entry->pte_flags);
748         if (ret) {
749                 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
750                                 entry->va, ret);
751                 return ret;
752         }
753
754         if (no_update_pte)
755                 return 0;
756
757         ret = update_gpuvm_pte(adev, entry, sync);
758         if (ret) {
759                 pr_err("update_gpuvm_pte() failed\n");
760                 goto update_gpuvm_pte_failed;
761         }
762
763         return 0;
764
765 update_gpuvm_pte_failed:
766         unmap_bo_from_gpuvm(adev, entry, sync);
767         return ret;
768 }
769
770 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
771 {
772         struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
773
774         if (!sg)
775                 return NULL;
776         if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
777                 kfree(sg);
778                 return NULL;
779         }
780         sg->sgl->dma_address = addr;
781         sg->sgl->length = size;
782 #ifdef CONFIG_NEED_SG_DMA_LENGTH
783         sg->sgl->dma_length = size;
784 #endif
785         return sg;
786 }
787
788 static int process_validate_vms(struct amdkfd_process_info *process_info)
789 {
790         struct amdgpu_vm *peer_vm;
791         int ret;
792
793         list_for_each_entry(peer_vm, &process_info->vm_list_head,
794                             vm_list_node) {
795                 ret = vm_validate_pt_pd_bos(peer_vm);
796                 if (ret)
797                         return ret;
798         }
799
800         return 0;
801 }
802
803 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
804                                  struct amdgpu_sync *sync)
805 {
806         struct amdgpu_vm *peer_vm;
807         int ret;
808
809         list_for_each_entry(peer_vm, &process_info->vm_list_head,
810                             vm_list_node) {
811                 struct amdgpu_bo *pd = peer_vm->root.base.bo;
812
813                 ret = amdgpu_sync_resv(NULL,
814                                         sync, pd->tbo.resv,
815                                         AMDGPU_FENCE_OWNER_UNDEFINED, false);
816                 if (ret)
817                         return ret;
818         }
819
820         return 0;
821 }
822
823 static int process_update_pds(struct amdkfd_process_info *process_info,
824                               struct amdgpu_sync *sync)
825 {
826         struct amdgpu_vm *peer_vm;
827         int ret;
828
829         list_for_each_entry(peer_vm, &process_info->vm_list_head,
830                             vm_list_node) {
831                 ret = vm_update_pds(peer_vm, sync);
832                 if (ret)
833                         return ret;
834         }
835
836         return 0;
837 }
838
839 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
840                        struct dma_fence **ef)
841 {
842         struct amdkfd_process_info *info = NULL;
843         int ret;
844
845         if (!*process_info) {
846                 info = kzalloc(sizeof(*info), GFP_KERNEL);
847                 if (!info)
848                         return -ENOMEM;
849
850                 mutex_init(&info->lock);
851                 INIT_LIST_HEAD(&info->vm_list_head);
852                 INIT_LIST_HEAD(&info->kfd_bo_list);
853                 INIT_LIST_HEAD(&info->userptr_valid_list);
854                 INIT_LIST_HEAD(&info->userptr_inval_list);
855
856                 info->eviction_fence =
857                         amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
858                                                    current->mm);
859                 if (!info->eviction_fence) {
860                         pr_err("Failed to create eviction fence\n");
861                         ret = -ENOMEM;
862                         goto create_evict_fence_fail;
863                 }
864
865                 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
866                 atomic_set(&info->evicted_bos, 0);
867                 INIT_DELAYED_WORK(&info->restore_userptr_work,
868                                   amdgpu_amdkfd_restore_userptr_worker);
869
870                 *process_info = info;
871                 *ef = dma_fence_get(&info->eviction_fence->base);
872         }
873
874         vm->process_info = *process_info;
875
876         /* Validate page directory and attach eviction fence */
877         ret = amdgpu_bo_reserve(vm->root.base.bo, true);
878         if (ret)
879                 goto reserve_pd_fail;
880         ret = vm_validate_pt_pd_bos(vm);
881         if (ret) {
882                 pr_err("validate_pt_pd_bos() failed\n");
883                 goto validate_pd_fail;
884         }
885         ret = amdgpu_bo_sync_wait(vm->root.base.bo,
886                                   AMDGPU_FENCE_OWNER_KFD, false);
887         if (ret)
888                 goto wait_pd_fail;
889         ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1);
890         if (ret)
891                 goto reserve_shared_fail;
892         amdgpu_bo_fence(vm->root.base.bo,
893                         &vm->process_info->eviction_fence->base, true);
894         amdgpu_bo_unreserve(vm->root.base.bo);
895
896         /* Update process info */
897         mutex_lock(&vm->process_info->lock);
898         list_add_tail(&vm->vm_list_node,
899                         &(vm->process_info->vm_list_head));
900         vm->process_info->n_vms++;
901         mutex_unlock(&vm->process_info->lock);
902
903         return 0;
904
905 reserve_shared_fail:
906 wait_pd_fail:
907 validate_pd_fail:
908         amdgpu_bo_unreserve(vm->root.base.bo);
909 reserve_pd_fail:
910         vm->process_info = NULL;
911         if (info) {
912                 /* Two fence references: one in info and one in *ef */
913                 dma_fence_put(&info->eviction_fence->base);
914                 dma_fence_put(*ef);
915                 *ef = NULL;
916                 *process_info = NULL;
917                 put_pid(info->pid);
918 create_evict_fence_fail:
919                 mutex_destroy(&info->lock);
920                 kfree(info);
921         }
922         return ret;
923 }
924
925 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
926                                           void **vm, void **process_info,
927                                           struct dma_fence **ef)
928 {
929         struct amdgpu_device *adev = get_amdgpu_device(kgd);
930         struct amdgpu_vm *new_vm;
931         int ret;
932
933         new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
934         if (!new_vm)
935                 return -ENOMEM;
936
937         /* Initialize AMDGPU part of the VM */
938         ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
939         if (ret) {
940                 pr_err("Failed init vm ret %d\n", ret);
941                 goto amdgpu_vm_init_fail;
942         }
943
944         /* Initialize KFD part of the VM and process info */
945         ret = init_kfd_vm(new_vm, process_info, ef);
946         if (ret)
947                 goto init_kfd_vm_fail;
948
949         *vm = (void *) new_vm;
950
951         return 0;
952
953 init_kfd_vm_fail:
954         amdgpu_vm_fini(adev, new_vm);
955 amdgpu_vm_init_fail:
956         kfree(new_vm);
957         return ret;
958 }
959
960 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
961                                            struct file *filp, unsigned int pasid,
962                                            void **vm, void **process_info,
963                                            struct dma_fence **ef)
964 {
965         struct amdgpu_device *adev = get_amdgpu_device(kgd);
966         struct drm_file *drm_priv = filp->private_data;
967         struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
968         struct amdgpu_vm *avm = &drv_priv->vm;
969         int ret;
970
971         /* Already a compute VM? */
972         if (avm->process_info)
973                 return -EINVAL;
974
975         /* Convert VM into a compute VM */
976         ret = amdgpu_vm_make_compute(adev, avm, pasid);
977         if (ret)
978                 return ret;
979
980         /* Initialize KFD part of the VM and process info */
981         ret = init_kfd_vm(avm, process_info, ef);
982         if (ret)
983                 return ret;
984
985         *vm = (void *)avm;
986
987         return 0;
988 }
989
990 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
991                                     struct amdgpu_vm *vm)
992 {
993         struct amdkfd_process_info *process_info = vm->process_info;
994         struct amdgpu_bo *pd = vm->root.base.bo;
995
996         if (!process_info)
997                 return;
998
999         /* Release eviction fence from PD */
1000         amdgpu_bo_reserve(pd, false);
1001         amdgpu_bo_fence(pd, NULL, false);
1002         amdgpu_bo_unreserve(pd);
1003
1004         /* Update process info */
1005         mutex_lock(&process_info->lock);
1006         process_info->n_vms--;
1007         list_del(&vm->vm_list_node);
1008         mutex_unlock(&process_info->lock);
1009
1010         /* Release per-process resources when last compute VM is destroyed */
1011         if (!process_info->n_vms) {
1012                 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1013                 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1014                 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1015
1016                 dma_fence_put(&process_info->eviction_fence->base);
1017                 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1018                 put_pid(process_info->pid);
1019                 mutex_destroy(&process_info->lock);
1020                 kfree(process_info);
1021         }
1022 }
1023
1024 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1025 {
1026         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1027         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1028
1029         if (WARN_ON(!kgd || !vm))
1030                 return;
1031
1032         pr_debug("Destroying process vm %p\n", vm);
1033
1034         /* Release the VM context */
1035         amdgpu_vm_fini(adev, avm);
1036         kfree(vm);
1037 }
1038
1039 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1040 {
1041         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1042         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1043
1044         if (WARN_ON(!kgd || !vm))
1045                 return;
1046
1047         pr_debug("Releasing process vm %p\n", vm);
1048
1049         /* The original pasid of amdgpu vm has already been
1050          * released during making a amdgpu vm to a compute vm
1051          * The current pasid is managed by kfd and will be
1052          * released on kfd process destroy. Set amdgpu pasid
1053          * to 0 to avoid duplicate release.
1054          */
1055         amdgpu_vm_release_compute(adev, avm);
1056 }
1057
1058 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1059 {
1060         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1061         struct amdgpu_bo *pd = avm->root.base.bo;
1062         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1063
1064         if (adev->asic_type < CHIP_VEGA10)
1065                 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1066         return avm->pd_phys_addr;
1067 }
1068
1069 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1070                 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1071                 void *vm, struct kgd_mem **mem,
1072                 uint64_t *offset, uint32_t flags)
1073 {
1074         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1075         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1076         enum ttm_bo_type bo_type = ttm_bo_type_device;
1077         struct sg_table *sg = NULL;
1078         uint64_t user_addr = 0;
1079         struct amdgpu_bo *bo;
1080         struct amdgpu_bo_param bp;
1081         int byte_align;
1082         u32 domain, alloc_domain;
1083         u64 alloc_flags;
1084         uint32_t mapping_flags;
1085         int ret;
1086
1087         /*
1088          * Check on which domain to allocate BO
1089          */
1090         if (flags & ALLOC_MEM_FLAGS_VRAM) {
1091                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1092                 alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED;
1093                 alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
1094                         AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1095                         AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1096         } else if (flags & ALLOC_MEM_FLAGS_GTT) {
1097                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1098                 alloc_flags = 0;
1099         } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
1100                 domain = AMDGPU_GEM_DOMAIN_GTT;
1101                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1102                 alloc_flags = 0;
1103                 if (!offset || !*offset)
1104                         return -EINVAL;
1105                 user_addr = *offset;
1106         } else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
1107                         ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1108                 domain = AMDGPU_GEM_DOMAIN_GTT;
1109                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1110                 bo_type = ttm_bo_type_sg;
1111                 alloc_flags = 0;
1112                 if (size > UINT_MAX)
1113                         return -EINVAL;
1114                 sg = create_doorbell_sg(*offset, size);
1115                 if (!sg)
1116                         return -ENOMEM;
1117         } else {
1118                 return -EINVAL;
1119         }
1120
1121         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1122         if (!*mem) {
1123                 ret = -ENOMEM;
1124                 goto err;
1125         }
1126         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1127         mutex_init(&(*mem)->lock);
1128         (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1129
1130         /* Workaround for AQL queue wraparound bug. Map the same
1131          * memory twice. That means we only actually allocate half
1132          * the memory.
1133          */
1134         if ((*mem)->aql_queue)
1135                 size = size >> 1;
1136
1137         /* Workaround for TLB bug on older VI chips */
1138         byte_align = (adev->family == AMDGPU_FAMILY_VI &&
1139                         adev->asic_type != CHIP_FIJI &&
1140                         adev->asic_type != CHIP_POLARIS10 &&
1141                         adev->asic_type != CHIP_POLARIS11 &&
1142                         adev->asic_type != CHIP_POLARIS12) ?
1143                         VI_BO_SIZE_ALIGN : 1;
1144
1145         mapping_flags = AMDGPU_VM_PAGE_READABLE;
1146         if (flags & ALLOC_MEM_FLAGS_WRITABLE)
1147                 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
1148         if (flags & ALLOC_MEM_FLAGS_EXECUTABLE)
1149                 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1150         if (flags & ALLOC_MEM_FLAGS_COHERENT)
1151                 mapping_flags |= AMDGPU_VM_MTYPE_UC;
1152         else
1153                 mapping_flags |= AMDGPU_VM_MTYPE_NC;
1154         (*mem)->mapping_flags = mapping_flags;
1155
1156         amdgpu_sync_create(&(*mem)->sync);
1157
1158         ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1159         if (ret) {
1160                 pr_debug("Insufficient system memory\n");
1161                 goto err_reserve_limit;
1162         }
1163
1164         pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1165                         va, size, domain_string(alloc_domain));
1166
1167         memset(&bp, 0, sizeof(bp));
1168         bp.size = size;
1169         bp.byte_align = byte_align;
1170         bp.domain = alloc_domain;
1171         bp.flags = alloc_flags;
1172         bp.type = bo_type;
1173         bp.resv = NULL;
1174         ret = amdgpu_bo_create(adev, &bp, &bo);
1175         if (ret) {
1176                 pr_debug("Failed to create BO on domain %s. ret %d\n",
1177                                 domain_string(alloc_domain), ret);
1178                 goto err_bo_create;
1179         }
1180         if (bo_type == ttm_bo_type_sg) {
1181                 bo->tbo.sg = sg;
1182                 bo->tbo.ttm->sg = sg;
1183         }
1184         bo->kfd_bo = *mem;
1185         (*mem)->bo = bo;
1186         if (user_addr)
1187                 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1188
1189         (*mem)->va = va;
1190         (*mem)->domain = domain;
1191         (*mem)->mapped_to_gpu_memory = 0;
1192         (*mem)->process_info = avm->process_info;
1193         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1194
1195         if (user_addr) {
1196                 ret = init_user_pages(*mem, current->mm, user_addr);
1197                 if (ret)
1198                         goto allocate_init_user_pages_failed;
1199         }
1200
1201         if (offset)
1202                 *offset = amdgpu_bo_mmap_offset(bo);
1203
1204         return 0;
1205
1206 allocate_init_user_pages_failed:
1207         remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1208         amdgpu_bo_unref(&bo);
1209         /* Don't unreserve system mem limit twice */
1210         goto err_reserve_limit;
1211 err_bo_create:
1212         unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1213 err_reserve_limit:
1214         mutex_destroy(&(*mem)->lock);
1215         kfree(*mem);
1216 err:
1217         if (sg) {
1218                 sg_free_table(sg);
1219                 kfree(sg);
1220         }
1221         return ret;
1222 }
1223
1224 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1225                 struct kgd_dev *kgd, struct kgd_mem *mem)
1226 {
1227         struct amdkfd_process_info *process_info = mem->process_info;
1228         unsigned long bo_size = mem->bo->tbo.mem.size;
1229         struct kfd_bo_va_list *entry, *tmp;
1230         struct bo_vm_reservation_context ctx;
1231         struct ttm_validate_buffer *bo_list_entry;
1232         int ret;
1233
1234         mutex_lock(&mem->lock);
1235
1236         if (mem->mapped_to_gpu_memory > 0) {
1237                 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1238                                 mem->va, bo_size);
1239                 mutex_unlock(&mem->lock);
1240                 return -EBUSY;
1241         }
1242
1243         mutex_unlock(&mem->lock);
1244         /* lock is not needed after this, since mem is unused and will
1245          * be freed anyway
1246          */
1247
1248         /* No more MMU notifiers */
1249         amdgpu_mn_unregister(mem->bo);
1250
1251         /* Make sure restore workers don't access the BO any more */
1252         bo_list_entry = &mem->validate_list;
1253         mutex_lock(&process_info->lock);
1254         list_del(&bo_list_entry->head);
1255         mutex_unlock(&process_info->lock);
1256
1257         ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1258         if (unlikely(ret))
1259                 return ret;
1260
1261         /* The eviction fence should be removed by the last unmap.
1262          * TODO: Log an error condition if the bo still has the eviction fence
1263          * attached
1264          */
1265         amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1266                                         process_info->eviction_fence);
1267         pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1268                 mem->va + bo_size * (1 + mem->aql_queue));
1269
1270         /* Remove from VM internal data structures */
1271         list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1272                 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1273                                 entry, bo_size);
1274
1275         ret = unreserve_bo_and_vms(&ctx, false, false);
1276
1277         /* Free the sync object */
1278         amdgpu_sync_free(&mem->sync);
1279
1280         /* If the SG is not NULL, it's one we created for a doorbell or mmio
1281          * remap BO. We need to free it.
1282          */
1283         if (mem->bo->tbo.sg) {
1284                 sg_free_table(mem->bo->tbo.sg);
1285                 kfree(mem->bo->tbo.sg);
1286         }
1287
1288         /* Free the BO*/
1289         amdgpu_bo_unref(&mem->bo);
1290         mutex_destroy(&mem->lock);
1291         kfree(mem);
1292
1293         return ret;
1294 }
1295
1296 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1297                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1298 {
1299         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1300         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1301         int ret;
1302         struct amdgpu_bo *bo;
1303         uint32_t domain;
1304         struct kfd_bo_va_list *entry;
1305         struct bo_vm_reservation_context ctx;
1306         struct kfd_bo_va_list *bo_va_entry = NULL;
1307         struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1308         unsigned long bo_size;
1309         bool is_invalid_userptr = false;
1310
1311         bo = mem->bo;
1312         if (!bo) {
1313                 pr_err("Invalid BO when mapping memory to GPU\n");
1314                 return -EINVAL;
1315         }
1316
1317         /* Make sure restore is not running concurrently. Since we
1318          * don't map invalid userptr BOs, we rely on the next restore
1319          * worker to do the mapping
1320          */
1321         mutex_lock(&mem->process_info->lock);
1322
1323         /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1324          * sure that the MMU notifier is no longer running
1325          * concurrently and the queues are actually stopped
1326          */
1327         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1328                 down_write(&current->mm->mmap_sem);
1329                 is_invalid_userptr = atomic_read(&mem->invalid);
1330                 up_write(&current->mm->mmap_sem);
1331         }
1332
1333         mutex_lock(&mem->lock);
1334
1335         domain = mem->domain;
1336         bo_size = bo->tbo.mem.size;
1337
1338         pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1339                         mem->va,
1340                         mem->va + bo_size * (1 + mem->aql_queue),
1341                         vm, domain_string(domain));
1342
1343         ret = reserve_bo_and_vm(mem, vm, &ctx);
1344         if (unlikely(ret))
1345                 goto out;
1346
1347         /* Userptr can be marked as "not invalid", but not actually be
1348          * validated yet (still in the system domain). In that case
1349          * the queues are still stopped and we can leave mapping for
1350          * the next restore worker
1351          */
1352         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1353             bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1354                 is_invalid_userptr = true;
1355
1356         if (check_if_add_bo_to_vm(avm, mem)) {
1357                 ret = add_bo_to_vm(adev, mem, avm, false,
1358                                 &bo_va_entry);
1359                 if (ret)
1360                         goto add_bo_to_vm_failed;
1361                 if (mem->aql_queue) {
1362                         ret = add_bo_to_vm(adev, mem, avm,
1363                                         true, &bo_va_entry_aql);
1364                         if (ret)
1365                                 goto add_bo_to_vm_failed_aql;
1366                 }
1367         } else {
1368                 ret = vm_validate_pt_pd_bos(avm);
1369                 if (unlikely(ret))
1370                         goto add_bo_to_vm_failed;
1371         }
1372
1373         if (mem->mapped_to_gpu_memory == 0 &&
1374             !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1375                 /* Validate BO only once. The eviction fence gets added to BO
1376                  * the first time it is mapped. Validate will wait for all
1377                  * background evictions to complete.
1378                  */
1379                 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1380                 if (ret) {
1381                         pr_debug("Validate failed\n");
1382                         goto map_bo_to_gpuvm_failed;
1383                 }
1384         }
1385
1386         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1387                 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1388                         pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1389                                         entry->va, entry->va + bo_size,
1390                                         entry);
1391
1392                         ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1393                                               is_invalid_userptr);
1394                         if (ret) {
1395                                 pr_err("Failed to map bo to gpuvm\n");
1396                                 goto map_bo_to_gpuvm_failed;
1397                         }
1398
1399                         ret = vm_update_pds(vm, ctx.sync);
1400                         if (ret) {
1401                                 pr_err("Failed to update page directories\n");
1402                                 goto map_bo_to_gpuvm_failed;
1403                         }
1404
1405                         entry->is_mapped = true;
1406                         mem->mapped_to_gpu_memory++;
1407                         pr_debug("\t INC mapping count %d\n",
1408                                         mem->mapped_to_gpu_memory);
1409                 }
1410         }
1411
1412         if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1413                 amdgpu_bo_fence(bo,
1414                                 &avm->process_info->eviction_fence->base,
1415                                 true);
1416         ret = unreserve_bo_and_vms(&ctx, false, false);
1417
1418         goto out;
1419
1420 map_bo_to_gpuvm_failed:
1421         if (bo_va_entry_aql)
1422                 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1423 add_bo_to_vm_failed_aql:
1424         if (bo_va_entry)
1425                 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1426 add_bo_to_vm_failed:
1427         unreserve_bo_and_vms(&ctx, false, false);
1428 out:
1429         mutex_unlock(&mem->process_info->lock);
1430         mutex_unlock(&mem->lock);
1431         return ret;
1432 }
1433
1434 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1435                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1436 {
1437         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1438         struct amdkfd_process_info *process_info =
1439                 ((struct amdgpu_vm *)vm)->process_info;
1440         unsigned long bo_size = mem->bo->tbo.mem.size;
1441         struct kfd_bo_va_list *entry;
1442         struct bo_vm_reservation_context ctx;
1443         int ret;
1444
1445         mutex_lock(&mem->lock);
1446
1447         ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1448         if (unlikely(ret))
1449                 goto out;
1450         /* If no VMs were reserved, it means the BO wasn't actually mapped */
1451         if (ctx.n_vms == 0) {
1452                 ret = -EINVAL;
1453                 goto unreserve_out;
1454         }
1455
1456         ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1457         if (unlikely(ret))
1458                 goto unreserve_out;
1459
1460         pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1461                 mem->va,
1462                 mem->va + bo_size * (1 + mem->aql_queue),
1463                 vm);
1464
1465         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1466                 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1467                         pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1468                                         entry->va,
1469                                         entry->va + bo_size,
1470                                         entry);
1471
1472                         ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1473                         if (ret == 0) {
1474                                 entry->is_mapped = false;
1475                         } else {
1476                                 pr_err("failed to unmap VA 0x%llx\n",
1477                                                 mem->va);
1478                                 goto unreserve_out;
1479                         }
1480
1481                         mem->mapped_to_gpu_memory--;
1482                         pr_debug("\t DEC mapping count %d\n",
1483                                         mem->mapped_to_gpu_memory);
1484                 }
1485         }
1486
1487         /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1488          * required.
1489          */
1490         if (mem->mapped_to_gpu_memory == 0 &&
1491             !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1492                 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1493                                                 process_info->eviction_fence);
1494
1495 unreserve_out:
1496         unreserve_bo_and_vms(&ctx, false, false);
1497 out:
1498         mutex_unlock(&mem->lock);
1499         return ret;
1500 }
1501
1502 int amdgpu_amdkfd_gpuvm_sync_memory(
1503                 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1504 {
1505         struct amdgpu_sync sync;
1506         int ret;
1507
1508         amdgpu_sync_create(&sync);
1509
1510         mutex_lock(&mem->lock);
1511         amdgpu_sync_clone(&mem->sync, &sync);
1512         mutex_unlock(&mem->lock);
1513
1514         ret = amdgpu_sync_wait(&sync, intr);
1515         amdgpu_sync_free(&sync);
1516         return ret;
1517 }
1518
1519 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1520                 struct kgd_mem *mem, void **kptr, uint64_t *size)
1521 {
1522         int ret;
1523         struct amdgpu_bo *bo = mem->bo;
1524
1525         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1526                 pr_err("userptr can't be mapped to kernel\n");
1527                 return -EINVAL;
1528         }
1529
1530         /* delete kgd_mem from kfd_bo_list to avoid re-validating
1531          * this BO in BO's restoring after eviction.
1532          */
1533         mutex_lock(&mem->process_info->lock);
1534
1535         ret = amdgpu_bo_reserve(bo, true);
1536         if (ret) {
1537                 pr_err("Failed to reserve bo. ret %d\n", ret);
1538                 goto bo_reserve_failed;
1539         }
1540
1541         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1542         if (ret) {
1543                 pr_err("Failed to pin bo. ret %d\n", ret);
1544                 goto pin_failed;
1545         }
1546
1547         ret = amdgpu_bo_kmap(bo, kptr);
1548         if (ret) {
1549                 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1550                 goto kmap_failed;
1551         }
1552
1553         amdgpu_amdkfd_remove_eviction_fence(
1554                 bo, mem->process_info->eviction_fence);
1555         list_del_init(&mem->validate_list.head);
1556
1557         if (size)
1558                 *size = amdgpu_bo_size(bo);
1559
1560         amdgpu_bo_unreserve(bo);
1561
1562         mutex_unlock(&mem->process_info->lock);
1563         return 0;
1564
1565 kmap_failed:
1566         amdgpu_bo_unpin(bo);
1567 pin_failed:
1568         amdgpu_bo_unreserve(bo);
1569 bo_reserve_failed:
1570         mutex_unlock(&mem->process_info->lock);
1571
1572         return ret;
1573 }
1574
1575 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1576                                               struct kfd_vm_fault_info *mem)
1577 {
1578         struct amdgpu_device *adev;
1579
1580         adev = (struct amdgpu_device *)kgd;
1581         if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1582                 *mem = *adev->gmc.vm_fault_info;
1583                 mb();
1584                 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1585         }
1586         return 0;
1587 }
1588
1589 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1590                                       struct dma_buf *dma_buf,
1591                                       uint64_t va, void *vm,
1592                                       struct kgd_mem **mem, uint64_t *size,
1593                                       uint64_t *mmap_offset)
1594 {
1595         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1596         struct drm_gem_object *obj;
1597         struct amdgpu_bo *bo;
1598         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1599
1600         if (dma_buf->ops != &amdgpu_dmabuf_ops)
1601                 /* Can't handle non-graphics buffers */
1602                 return -EINVAL;
1603
1604         obj = dma_buf->priv;
1605         if (obj->dev->dev_private != adev)
1606                 /* Can't handle buffers from other devices */
1607                 return -EINVAL;
1608
1609         bo = gem_to_amdgpu_bo(obj);
1610         if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1611                                     AMDGPU_GEM_DOMAIN_GTT)))
1612                 /* Only VRAM and GTT BOs are supported */
1613                 return -EINVAL;
1614
1615         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1616         if (!*mem)
1617                 return -ENOMEM;
1618
1619         if (size)
1620                 *size = amdgpu_bo_size(bo);
1621
1622         if (mmap_offset)
1623                 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1624
1625         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1626         mutex_init(&(*mem)->lock);
1627         (*mem)->mapping_flags =
1628                 AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
1629                 AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC;
1630
1631         (*mem)->bo = amdgpu_bo_ref(bo);
1632         (*mem)->va = va;
1633         (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1634                 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1635         (*mem)->mapped_to_gpu_memory = 0;
1636         (*mem)->process_info = avm->process_info;
1637         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1638         amdgpu_sync_create(&(*mem)->sync);
1639
1640         return 0;
1641 }
1642
1643 /* Evict a userptr BO by stopping the queues if necessary
1644  *
1645  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1646  * cannot do any memory allocations, and cannot take any locks that
1647  * are held elsewhere while allocating memory. Therefore this is as
1648  * simple as possible, using atomic counters.
1649  *
1650  * It doesn't do anything to the BO itself. The real work happens in
1651  * restore, where we get updated page addresses. This function only
1652  * ensures that GPU access to the BO is stopped.
1653  */
1654 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1655                                 struct mm_struct *mm)
1656 {
1657         struct amdkfd_process_info *process_info = mem->process_info;
1658         int invalid, evicted_bos;
1659         int r = 0;
1660
1661         invalid = atomic_inc_return(&mem->invalid);
1662         evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1663         if (evicted_bos == 1) {
1664                 /* First eviction, stop the queues */
1665                 r = kgd2kfd_quiesce_mm(mm);
1666                 if (r)
1667                         pr_err("Failed to quiesce KFD\n");
1668                 schedule_delayed_work(&process_info->restore_userptr_work,
1669                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1670         }
1671
1672         return r;
1673 }
1674
1675 /* Update invalid userptr BOs
1676  *
1677  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1678  * userptr_inval_list and updates user pages for all BOs that have
1679  * been invalidated since their last update.
1680  */
1681 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1682                                      struct mm_struct *mm)
1683 {
1684         struct kgd_mem *mem, *tmp_mem;
1685         struct amdgpu_bo *bo;
1686         struct ttm_operation_ctx ctx = { false, false };
1687         int invalid, ret;
1688
1689         /* Move all invalidated BOs to the userptr_inval_list and
1690          * release their user pages by migration to the CPU domain
1691          */
1692         list_for_each_entry_safe(mem, tmp_mem,
1693                                  &process_info->userptr_valid_list,
1694                                  validate_list.head) {
1695                 if (!atomic_read(&mem->invalid))
1696                         continue; /* BO is still valid */
1697
1698                 bo = mem->bo;
1699
1700                 if (amdgpu_bo_reserve(bo, true))
1701                         return -EAGAIN;
1702                 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1703                 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1704                 amdgpu_bo_unreserve(bo);
1705                 if (ret) {
1706                         pr_err("%s: Failed to invalidate userptr BO\n",
1707                                __func__);
1708                         return -EAGAIN;
1709                 }
1710
1711                 list_move_tail(&mem->validate_list.head,
1712                                &process_info->userptr_inval_list);
1713         }
1714
1715         if (list_empty(&process_info->userptr_inval_list))
1716                 return 0; /* All evicted userptr BOs were freed */
1717
1718         /* Go through userptr_inval_list and update any invalid user_pages */
1719         list_for_each_entry(mem, &process_info->userptr_inval_list,
1720                             validate_list.head) {
1721                 invalid = atomic_read(&mem->invalid);
1722                 if (!invalid)
1723                         /* BO hasn't been invalidated since the last
1724                          * revalidation attempt. Keep its BO list.
1725                          */
1726                         continue;
1727
1728                 bo = mem->bo;
1729
1730                 /* Get updated user pages */
1731                 ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
1732                                                    bo->tbo.ttm->pages);
1733                 if (ret) {
1734                         bo->tbo.ttm->pages[0] = NULL;
1735                         pr_info("%s: Failed to get user pages: %d\n",
1736                                 __func__, ret);
1737                         /* Pretend it succeeded. It will fail later
1738                          * with a VM fault if the GPU tries to access
1739                          * it. Better than hanging indefinitely with
1740                          * stalled user mode queues.
1741                          */
1742                 }
1743         }
1744
1745         return 0;
1746 }
1747
1748 /* Remove invalid userptr BOs from hmm track list
1749  *
1750  * Stop HMM track the userptr update
1751  */
1752 static void untrack_invalid_user_pages(struct amdkfd_process_info *process_info)
1753 {
1754         struct kgd_mem *mem, *tmp_mem;
1755         struct amdgpu_bo *bo;
1756
1757         list_for_each_entry_safe(mem, tmp_mem,
1758                                  &process_info->userptr_inval_list,
1759                                  validate_list.head) {
1760                 bo = mem->bo;
1761                 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1762         }
1763 }
1764
1765 /* Validate invalid userptr BOs
1766  *
1767  * Validates BOs on the userptr_inval_list, and moves them back to the
1768  * userptr_valid_list. Also updates GPUVM page tables with new page
1769  * addresses and waits for the page table updates to complete.
1770  */
1771 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1772 {
1773         struct amdgpu_bo_list_entry *pd_bo_list_entries;
1774         struct list_head resv_list, duplicates;
1775         struct ww_acquire_ctx ticket;
1776         struct amdgpu_sync sync;
1777
1778         struct amdgpu_vm *peer_vm;
1779         struct kgd_mem *mem, *tmp_mem;
1780         struct amdgpu_bo *bo;
1781         struct ttm_operation_ctx ctx = { false, false };
1782         int i, ret;
1783
1784         pd_bo_list_entries = kcalloc(process_info->n_vms,
1785                                      sizeof(struct amdgpu_bo_list_entry),
1786                                      GFP_KERNEL);
1787         if (!pd_bo_list_entries) {
1788                 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1789                 ret = -ENOMEM;
1790                 goto out_no_mem;
1791         }
1792
1793         INIT_LIST_HEAD(&resv_list);
1794         INIT_LIST_HEAD(&duplicates);
1795
1796         /* Get all the page directory BOs that need to be reserved */
1797         i = 0;
1798         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1799                             vm_list_node)
1800                 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1801                                     &pd_bo_list_entries[i++]);
1802         /* Add the userptr_inval_list entries to resv_list */
1803         list_for_each_entry(mem, &process_info->userptr_inval_list,
1804                             validate_list.head) {
1805                 list_add_tail(&mem->resv_list.head, &resv_list);
1806                 mem->resv_list.bo = mem->validate_list.bo;
1807                 mem->resv_list.num_shared = mem->validate_list.num_shared;
1808         }
1809
1810         /* Reserve all BOs and page tables for validation */
1811         ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates,
1812                                      true);
1813         WARN(!list_empty(&duplicates), "Duplicates should be empty");
1814         if (ret)
1815                 goto out_free;
1816
1817         amdgpu_sync_create(&sync);
1818
1819         ret = process_validate_vms(process_info);
1820         if (ret)
1821                 goto unreserve_out;
1822
1823         /* Validate BOs and update GPUVM page tables */
1824         list_for_each_entry_safe(mem, tmp_mem,
1825                                  &process_info->userptr_inval_list,
1826                                  validate_list.head) {
1827                 struct kfd_bo_va_list *bo_va_entry;
1828
1829                 bo = mem->bo;
1830
1831                 /* Validate the BO if we got user pages */
1832                 if (bo->tbo.ttm->pages[0]) {
1833                         amdgpu_bo_placement_from_domain(bo, mem->domain);
1834                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1835                         if (ret) {
1836                                 pr_err("%s: failed to validate BO\n", __func__);
1837                                 goto unreserve_out;
1838                         }
1839                 }
1840
1841                 list_move_tail(&mem->validate_list.head,
1842                                &process_info->userptr_valid_list);
1843
1844                 /* Stop HMM track the userptr update. We dont check the return
1845                  * value for concurrent CPU page table update because we will
1846                  * reschedule the restore worker if process_info->evicted_bos
1847                  * is updated.
1848                  */
1849                 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1850
1851                 /* Update mapping. If the BO was not validated
1852                  * (because we couldn't get user pages), this will
1853                  * clear the page table entries, which will result in
1854                  * VM faults if the GPU tries to access the invalid
1855                  * memory.
1856                  */
1857                 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1858                         if (!bo_va_entry->is_mapped)
1859                                 continue;
1860
1861                         ret = update_gpuvm_pte((struct amdgpu_device *)
1862                                                bo_va_entry->kgd_dev,
1863                                                bo_va_entry, &sync);
1864                         if (ret) {
1865                                 pr_err("%s: update PTE failed\n", __func__);
1866                                 /* make sure this gets validated again */
1867                                 atomic_inc(&mem->invalid);
1868                                 goto unreserve_out;
1869                         }
1870                 }
1871         }
1872
1873         /* Update page directories */
1874         ret = process_update_pds(process_info, &sync);
1875
1876 unreserve_out:
1877         ttm_eu_backoff_reservation(&ticket, &resv_list);
1878         amdgpu_sync_wait(&sync, false);
1879         amdgpu_sync_free(&sync);
1880 out_free:
1881         kfree(pd_bo_list_entries);
1882 out_no_mem:
1883
1884         return ret;
1885 }
1886
1887 /* Worker callback to restore evicted userptr BOs
1888  *
1889  * Tries to update and validate all userptr BOs. If successful and no
1890  * concurrent evictions happened, the queues are restarted. Otherwise,
1891  * reschedule for another attempt later.
1892  */
1893 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1894 {
1895         struct delayed_work *dwork = to_delayed_work(work);
1896         struct amdkfd_process_info *process_info =
1897                 container_of(dwork, struct amdkfd_process_info,
1898                              restore_userptr_work);
1899         struct task_struct *usertask;
1900         struct mm_struct *mm;
1901         int evicted_bos;
1902
1903         evicted_bos = atomic_read(&process_info->evicted_bos);
1904         if (!evicted_bos)
1905                 return;
1906
1907         /* Reference task and mm in case of concurrent process termination */
1908         usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1909         if (!usertask)
1910                 return;
1911         mm = get_task_mm(usertask);
1912         if (!mm) {
1913                 put_task_struct(usertask);
1914                 return;
1915         }
1916
1917         mutex_lock(&process_info->lock);
1918
1919         if (update_invalid_user_pages(process_info, mm))
1920                 goto unlock_out;
1921         /* userptr_inval_list can be empty if all evicted userptr BOs
1922          * have been freed. In that case there is nothing to validate
1923          * and we can just restart the queues.
1924          */
1925         if (!list_empty(&process_info->userptr_inval_list)) {
1926                 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1927                         goto unlock_out; /* Concurrent eviction, try again */
1928
1929                 if (validate_invalid_user_pages(process_info))
1930                         goto unlock_out;
1931         }
1932         /* Final check for concurrent evicton and atomic update. If
1933          * another eviction happens after successful update, it will
1934          * be a first eviction that calls quiesce_mm. The eviction
1935          * reference counting inside KFD will handle this case.
1936          */
1937         if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1938             evicted_bos)
1939                 goto unlock_out;
1940         evicted_bos = 0;
1941         if (kgd2kfd_resume_mm(mm)) {
1942                 pr_err("%s: Failed to resume KFD\n", __func__);
1943                 /* No recovery from this failure. Probably the CP is
1944                  * hanging. No point trying again.
1945                  */
1946         }
1947
1948 unlock_out:
1949         untrack_invalid_user_pages(process_info);
1950         mutex_unlock(&process_info->lock);
1951         mmput(mm);
1952         put_task_struct(usertask);
1953
1954         /* If validation failed, reschedule another attempt */
1955         if (evicted_bos)
1956                 schedule_delayed_work(&process_info->restore_userptr_work,
1957                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1958 }
1959
1960 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1961  *   KFD process identified by process_info
1962  *
1963  * @process_info: amdkfd_process_info of the KFD process
1964  *
1965  * After memory eviction, restore thread calls this function. The function
1966  * should be called when the Process is still valid. BO restore involves -
1967  *
1968  * 1.  Release old eviction fence and create new one
1969  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
1970  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
1971  *     BOs that need to be reserved.
1972  * 4.  Reserve all the BOs
1973  * 5.  Validate of PD and PT BOs.
1974  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
1975  * 7.  Add fence to all PD and PT BOs.
1976  * 8.  Unreserve all BOs
1977  */
1978 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
1979 {
1980         struct amdgpu_bo_list_entry *pd_bo_list;
1981         struct amdkfd_process_info *process_info = info;
1982         struct amdgpu_vm *peer_vm;
1983         struct kgd_mem *mem;
1984         struct bo_vm_reservation_context ctx;
1985         struct amdgpu_amdkfd_fence *new_fence;
1986         int ret = 0, i;
1987         struct list_head duplicate_save;
1988         struct amdgpu_sync sync_obj;
1989
1990         INIT_LIST_HEAD(&duplicate_save);
1991         INIT_LIST_HEAD(&ctx.list);
1992         INIT_LIST_HEAD(&ctx.duplicates);
1993
1994         pd_bo_list = kcalloc(process_info->n_vms,
1995                              sizeof(struct amdgpu_bo_list_entry),
1996                              GFP_KERNEL);
1997         if (!pd_bo_list)
1998                 return -ENOMEM;
1999
2000         i = 0;
2001         mutex_lock(&process_info->lock);
2002         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2003                         vm_list_node)
2004                 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2005
2006         /* Reserve all BOs and page tables/directory. Add all BOs from
2007          * kfd_bo_list to ctx.list
2008          */
2009         list_for_each_entry(mem, &process_info->kfd_bo_list,
2010                             validate_list.head) {
2011
2012                 list_add_tail(&mem->resv_list.head, &ctx.list);
2013                 mem->resv_list.bo = mem->validate_list.bo;
2014                 mem->resv_list.num_shared = mem->validate_list.num_shared;
2015         }
2016
2017         ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2018                                      false, &duplicate_save, true);
2019         if (ret) {
2020                 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2021                 goto ttm_reserve_fail;
2022         }
2023
2024         amdgpu_sync_create(&sync_obj);
2025
2026         /* Validate PDs and PTs */
2027         ret = process_validate_vms(process_info);
2028         if (ret)
2029                 goto validate_map_fail;
2030
2031         ret = process_sync_pds_resv(process_info, &sync_obj);
2032         if (ret) {
2033                 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2034                 goto validate_map_fail;
2035         }
2036
2037         /* Validate BOs and map them to GPUVM (update VM page tables). */
2038         list_for_each_entry(mem, &process_info->kfd_bo_list,
2039                             validate_list.head) {
2040
2041                 struct amdgpu_bo *bo = mem->bo;
2042                 uint32_t domain = mem->domain;
2043                 struct kfd_bo_va_list *bo_va_entry;
2044
2045                 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2046                 if (ret) {
2047                         pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2048                         goto validate_map_fail;
2049                 }
2050                 ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
2051                 if (ret) {
2052                         pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2053                         goto validate_map_fail;
2054                 }
2055                 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2056                                     bo_list) {
2057                         ret = update_gpuvm_pte((struct amdgpu_device *)
2058                                               bo_va_entry->kgd_dev,
2059                                               bo_va_entry,
2060                                               &sync_obj);
2061                         if (ret) {
2062                                 pr_debug("Memory eviction: update PTE failed. Try again\n");
2063                                 goto validate_map_fail;
2064                         }
2065                 }
2066         }
2067
2068         /* Update page directories */
2069         ret = process_update_pds(process_info, &sync_obj);
2070         if (ret) {
2071                 pr_debug("Memory eviction: update PDs failed. Try again\n");
2072                 goto validate_map_fail;
2073         }
2074
2075         /* Wait for validate and PT updates to finish */
2076         amdgpu_sync_wait(&sync_obj, false);
2077
2078         /* Release old eviction fence and create new one, because fence only
2079          * goes from unsignaled to signaled, fence cannot be reused.
2080          * Use context and mm from the old fence.
2081          */
2082         new_fence = amdgpu_amdkfd_fence_create(
2083                                 process_info->eviction_fence->base.context,
2084                                 process_info->eviction_fence->mm);
2085         if (!new_fence) {
2086                 pr_err("Failed to create eviction fence\n");
2087                 ret = -ENOMEM;
2088                 goto validate_map_fail;
2089         }
2090         dma_fence_put(&process_info->eviction_fence->base);
2091         process_info->eviction_fence = new_fence;
2092         *ef = dma_fence_get(&new_fence->base);
2093
2094         /* Attach new eviction fence to all BOs */
2095         list_for_each_entry(mem, &process_info->kfd_bo_list,
2096                 validate_list.head)
2097                 amdgpu_bo_fence(mem->bo,
2098                         &process_info->eviction_fence->base, true);
2099
2100         /* Attach eviction fence to PD / PT BOs */
2101         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2102                             vm_list_node) {
2103                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2104
2105                 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2106         }
2107
2108 validate_map_fail:
2109         ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2110         amdgpu_sync_free(&sync_obj);
2111 ttm_reserve_fail:
2112         mutex_unlock(&process_info->lock);
2113         kfree(pd_bo_list);
2114         return ret;
2115 }
2116
2117 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2118 {
2119         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2120         struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2121         int ret;
2122
2123         if (!info || !gws)
2124                 return -EINVAL;
2125
2126         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2127         if (!*mem)
2128                 return -ENOMEM;
2129
2130         mutex_init(&(*mem)->lock);
2131         (*mem)->bo = amdgpu_bo_ref(gws_bo);
2132         (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2133         (*mem)->process_info = process_info;
2134         add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2135         amdgpu_sync_create(&(*mem)->sync);
2136
2137
2138         /* Validate gws bo the first time it is added to process */
2139         mutex_lock(&(*mem)->process_info->lock);
2140         ret = amdgpu_bo_reserve(gws_bo, false);
2141         if (unlikely(ret)) {
2142                 pr_err("Reserve gws bo failed %d\n", ret);
2143                 goto bo_reservation_failure;
2144         }
2145
2146         ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2147         if (ret) {
2148                 pr_err("GWS BO validate failed %d\n", ret);
2149                 goto bo_validation_failure;
2150         }
2151         /* GWS resource is shared b/t amdgpu and amdkfd
2152          * Add process eviction fence to bo so they can
2153          * evict each other.
2154          */
2155         amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2156         amdgpu_bo_unreserve(gws_bo);
2157         mutex_unlock(&(*mem)->process_info->lock);
2158
2159         return ret;
2160
2161 bo_validation_failure:
2162         amdgpu_bo_unreserve(gws_bo);
2163 bo_reservation_failure:
2164         mutex_unlock(&(*mem)->process_info->lock);
2165         amdgpu_sync_free(&(*mem)->sync);
2166         remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2167         amdgpu_bo_unref(&gws_bo);
2168         mutex_destroy(&(*mem)->lock);
2169         kfree(*mem);
2170         *mem = NULL;
2171         return ret;
2172 }
2173
2174 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2175 {
2176         int ret;
2177         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2178         struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2179         struct amdgpu_bo *gws_bo = kgd_mem->bo;
2180
2181         /* Remove BO from process's validate list so restore worker won't touch
2182          * it anymore
2183          */
2184         remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2185
2186         ret = amdgpu_bo_reserve(gws_bo, false);
2187         if (unlikely(ret)) {
2188                 pr_err("Reserve gws bo failed %d\n", ret);
2189                 //TODO add BO back to validate_list?
2190                 return ret;
2191         }
2192         amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2193                         process_info->eviction_fence);
2194         amdgpu_bo_unreserve(gws_bo);
2195         amdgpu_sync_free(&kgd_mem->sync);
2196         amdgpu_bo_unref(&gws_bo);
2197         mutex_destroy(&kgd_mem->lock);
2198         kfree(mem);
2199         return 0;
2200 }
This page took 0.168119 seconds and 4 git commands to generate.