]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drm/amd/amdgpu: add psp support for beige_goby
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
27
28 #include "amdgpu_object.h"
29 #include "amdgpu_gem.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_amdkfd.h"
32 #include "amdgpu_dma_buf.h"
33 #include <uapi/linux/kfd_ioctl.h>
34 #include "amdgpu_xgmi.h"
35
36 /* Userptr restore delay, just long enough to allow consecutive VM
37  * changes to accumulate
38  */
39 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
40
41 /* Impose limit on how much memory KFD can use */
42 static struct {
43         uint64_t max_system_mem_limit;
44         uint64_t max_ttm_mem_limit;
45         int64_t system_mem_used;
46         int64_t ttm_mem_used;
47         spinlock_t mem_limit_lock;
48 } kfd_mem_limit;
49
50 /* Struct used for amdgpu_amdkfd_bo_validate */
51 struct amdgpu_vm_parser {
52         uint32_t        domain;
53         bool            wait;
54 };
55
56 static const char * const domain_bit_to_string[] = {
57                 "CPU",
58                 "GTT",
59                 "VRAM",
60                 "GDS",
61                 "GWS",
62                 "OA"
63 };
64
65 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
66
67 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
68
69
70 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
71 {
72         return (struct amdgpu_device *)kgd;
73 }
74
75 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
76                 struct kgd_mem *mem)
77 {
78         struct kfd_bo_va_list *entry;
79
80         list_for_each_entry(entry, &mem->bo_va_list, bo_list)
81                 if (entry->bo_va->base.vm == avm)
82                         return false;
83
84         return true;
85 }
86
87 /* Set memory usage limits. Current, limits are
88  *  System (TTM + userptr) memory - 15/16th System RAM
89  *  TTM memory - 3/8th System RAM
90  */
91 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
92 {
93         struct sysinfo si;
94         uint64_t mem;
95
96         si_meminfo(&si);
97         mem = si.freeram - si.freehigh;
98         mem *= si.mem_unit;
99
100         spin_lock_init(&kfd_mem_limit.mem_limit_lock);
101         kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
102         kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
103         pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
104                 (kfd_mem_limit.max_system_mem_limit >> 20),
105                 (kfd_mem_limit.max_ttm_mem_limit >> 20));
106 }
107
108 void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
109 {
110         kfd_mem_limit.system_mem_used += size;
111 }
112
113 /* Estimate page table size needed to represent a given memory size
114  *
115  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
116  * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
117  * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
118  * for 2MB pages for TLB efficiency. However, small allocations and
119  * fragmented system memory still need some 4KB pages. We choose a
120  * compromise that should work in most cases without reserving too
121  * much memory for page tables unnecessarily (factor 16K, >> 14).
122  */
123 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
124
125 static size_t amdgpu_amdkfd_acc_size(uint64_t size)
126 {
127         size >>= PAGE_SHIFT;
128         size *= sizeof(dma_addr_t) + sizeof(void *);
129
130         return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
131                 __roundup_pow_of_two(sizeof(struct ttm_tt)) +
132                 PAGE_ALIGN(size);
133 }
134
135 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
136                 uint64_t size, u32 domain, bool sg)
137 {
138         uint64_t reserved_for_pt =
139                 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
140         size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
141         int ret = 0;
142
143         acc_size = amdgpu_amdkfd_acc_size(size);
144
145         vram_needed = 0;
146         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
147                 /* TTM GTT memory */
148                 system_mem_needed = acc_size + size;
149                 ttm_mem_needed = acc_size + size;
150         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
151                 /* Userptr */
152                 system_mem_needed = acc_size + size;
153                 ttm_mem_needed = acc_size;
154         } else {
155                 /* VRAM and SG */
156                 system_mem_needed = acc_size;
157                 ttm_mem_needed = acc_size;
158                 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
159                         vram_needed = size;
160         }
161
162         spin_lock(&kfd_mem_limit.mem_limit_lock);
163
164         if (kfd_mem_limit.system_mem_used + system_mem_needed >
165             kfd_mem_limit.max_system_mem_limit)
166                 pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
167
168         if ((kfd_mem_limit.system_mem_used + system_mem_needed >
169              kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
170             (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
171              kfd_mem_limit.max_ttm_mem_limit) ||
172             (adev->kfd.vram_used + vram_needed >
173              adev->gmc.real_vram_size - reserved_for_pt)) {
174                 ret = -ENOMEM;
175         } else {
176                 kfd_mem_limit.system_mem_used += system_mem_needed;
177                 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
178                 adev->kfd.vram_used += vram_needed;
179         }
180
181         spin_unlock(&kfd_mem_limit.mem_limit_lock);
182         return ret;
183 }
184
185 static void unreserve_mem_limit(struct amdgpu_device *adev,
186                 uint64_t size, u32 domain, bool sg)
187 {
188         size_t acc_size;
189
190         acc_size = amdgpu_amdkfd_acc_size(size);
191
192         spin_lock(&kfd_mem_limit.mem_limit_lock);
193         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
194                 kfd_mem_limit.system_mem_used -= (acc_size + size);
195                 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
196         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
197                 kfd_mem_limit.system_mem_used -= (acc_size + size);
198                 kfd_mem_limit.ttm_mem_used -= acc_size;
199         } else {
200                 kfd_mem_limit.system_mem_used -= acc_size;
201                 kfd_mem_limit.ttm_mem_used -= acc_size;
202                 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
203                         adev->kfd.vram_used -= size;
204                         WARN_ONCE(adev->kfd.vram_used < 0,
205                                   "kfd VRAM memory accounting unbalanced");
206                 }
207         }
208         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
209                   "kfd system memory accounting unbalanced");
210         WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
211                   "kfd TTM memory accounting unbalanced");
212
213         spin_unlock(&kfd_mem_limit.mem_limit_lock);
214 }
215
216 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
217 {
218         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
219         u32 domain = bo->preferred_domains;
220         bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
221
222         if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) {
223                 domain = AMDGPU_GEM_DOMAIN_CPU;
224                 sg = false;
225         }
226
227         unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
228 }
229
230
231 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
232  *  reservation object.
233  *
234  * @bo: [IN] Remove eviction fence(s) from this BO
235  * @ef: [IN] This eviction fence is removed if it
236  *  is present in the shared list.
237  *
238  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
239  */
240 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
241                                         struct amdgpu_amdkfd_fence *ef)
242 {
243         struct dma_resv *resv = bo->tbo.base.resv;
244         struct dma_resv_list *old, *new;
245         unsigned int i, j, k;
246
247         if (!ef)
248                 return -EINVAL;
249
250         old = dma_resv_get_list(resv);
251         if (!old)
252                 return 0;
253
254         new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
255         if (!new)
256                 return -ENOMEM;
257
258         /* Go through all the shared fences in the resevation object and sort
259          * the interesting ones to the end of the list.
260          */
261         for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
262                 struct dma_fence *f;
263
264                 f = rcu_dereference_protected(old->shared[i],
265                                               dma_resv_held(resv));
266
267                 if (f->context == ef->base.context)
268                         RCU_INIT_POINTER(new->shared[--j], f);
269                 else
270                         RCU_INIT_POINTER(new->shared[k++], f);
271         }
272         new->shared_max = old->shared_max;
273         new->shared_count = k;
274
275         /* Install the new fence list, seqcount provides the barriers */
276         write_seqcount_begin(&resv->seq);
277         RCU_INIT_POINTER(resv->fence, new);
278         write_seqcount_end(&resv->seq);
279
280         /* Drop the references to the removed fences or move them to ef_list */
281         for (i = j, k = 0; i < old->shared_count; ++i) {
282                 struct dma_fence *f;
283
284                 f = rcu_dereference_protected(new->shared[i],
285                                               dma_resv_held(resv));
286                 dma_fence_put(f);
287         }
288         kfree_rcu(old, rcu);
289
290         return 0;
291 }
292
293 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
294 {
295         struct amdgpu_bo *root = bo;
296         struct amdgpu_vm_bo_base *vm_bo;
297         struct amdgpu_vm *vm;
298         struct amdkfd_process_info *info;
299         struct amdgpu_amdkfd_fence *ef;
300         int ret;
301
302         /* we can always get vm_bo from root PD bo.*/
303         while (root->parent)
304                 root = root->parent;
305
306         vm_bo = root->vm_bo;
307         if (!vm_bo)
308                 return 0;
309
310         vm = vm_bo->vm;
311         if (!vm)
312                 return 0;
313
314         info = vm->process_info;
315         if (!info || !info->eviction_fence)
316                 return 0;
317
318         ef = container_of(dma_fence_get(&info->eviction_fence->base),
319                         struct amdgpu_amdkfd_fence, base);
320
321         BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
322         ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
323         dma_resv_unlock(bo->tbo.base.resv);
324
325         dma_fence_put(&ef->base);
326         return ret;
327 }
328
329 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
330                                      bool wait)
331 {
332         struct ttm_operation_ctx ctx = { false, false };
333         int ret;
334
335         if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
336                  "Called with userptr BO"))
337                 return -EINVAL;
338
339         amdgpu_bo_placement_from_domain(bo, domain);
340
341         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
342         if (ret)
343                 goto validate_fail;
344         if (wait)
345                 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
346
347 validate_fail:
348         return ret;
349 }
350
351 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
352 {
353         struct amdgpu_vm_parser *p = param;
354
355         return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
356 }
357
358 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
359  *
360  * Page directories are not updated here because huge page handling
361  * during page table updates can invalidate page directory entries
362  * again. Page directories are only updated after updating page
363  * tables.
364  */
365 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
366 {
367         struct amdgpu_bo *pd = vm->root.base.bo;
368         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
369         struct amdgpu_vm_parser param;
370         int ret;
371
372         param.domain = AMDGPU_GEM_DOMAIN_VRAM;
373         param.wait = false;
374
375         ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
376                                         &param);
377         if (ret) {
378                 pr_err("failed to validate PT BOs\n");
379                 return ret;
380         }
381
382         ret = amdgpu_amdkfd_validate(&param, pd);
383         if (ret) {
384                 pr_err("failed to validate PD\n");
385                 return ret;
386         }
387
388         vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
389
390         if (vm->use_cpu_for_update) {
391                 ret = amdgpu_bo_kmap(pd, NULL);
392                 if (ret) {
393                         pr_err("failed to kmap PD, ret=%d\n", ret);
394                         return ret;
395                 }
396         }
397
398         return 0;
399 }
400
401 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
402 {
403         struct amdgpu_bo *pd = vm->root.base.bo;
404         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
405         int ret;
406
407         ret = amdgpu_vm_update_pdes(adev, vm, false);
408         if (ret)
409                 return ret;
410
411         return amdgpu_sync_fence(sync, vm->last_update);
412 }
413
414 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
415 {
416         struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
417         bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
418         bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED;
419         uint32_t mapping_flags;
420         uint64_t pte_flags;
421         bool snoop = false;
422
423         mapping_flags = AMDGPU_VM_PAGE_READABLE;
424         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
425                 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
426         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
427                 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
428
429         switch (adev->asic_type) {
430         case CHIP_ARCTURUS:
431                 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
432                         if (bo_adev == adev)
433                                 mapping_flags |= coherent ?
434                                         AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
435                         else
436                                 mapping_flags |= coherent ?
437                                         AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
438                 } else {
439                         mapping_flags |= coherent ?
440                                 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
441                 }
442                 break;
443         case CHIP_ALDEBARAN:
444                 if (coherent && uncached) {
445                         if (adev->gmc.xgmi.connected_to_cpu ||
446                                 !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
447                                 snoop = true;
448                         mapping_flags |= AMDGPU_VM_MTYPE_UC;
449                 } else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
450                         if (bo_adev == adev) {
451                                 mapping_flags |= coherent ?
452                                         AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
453                                 if (adev->gmc.xgmi.connected_to_cpu)
454                                         snoop = true;
455                         } else {
456                                 mapping_flags |= coherent ?
457                                         AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
458                                 if (amdgpu_xgmi_same_hive(adev, bo_adev))
459                                         snoop = true;
460                         }
461                 } else {
462                         snoop = true;
463                         mapping_flags |= coherent ?
464                                 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
465                 }
466                 break;
467         default:
468                 mapping_flags |= coherent ?
469                         AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
470         }
471
472         pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
473         pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
474
475         return pte_flags;
476 }
477
478 /* add_bo_to_vm - Add a BO to a VM
479  *
480  * Everything that needs to bo done only once when a BO is first added
481  * to a VM. It can later be mapped and unmapped many times without
482  * repeating these steps.
483  *
484  * 1. Allocate and initialize BO VA entry data structure
485  * 2. Add BO to the VM
486  * 3. Determine ASIC-specific PTE flags
487  * 4. Alloc page tables and directories if needed
488  * 4a.  Validate new page tables and directories
489  */
490 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
491                 struct amdgpu_vm *vm, bool is_aql,
492                 struct kfd_bo_va_list **p_bo_va_entry)
493 {
494         int ret;
495         struct kfd_bo_va_list *bo_va_entry;
496         struct amdgpu_bo *bo = mem->bo;
497         uint64_t va = mem->va;
498         struct list_head *list_bo_va = &mem->bo_va_list;
499         unsigned long bo_size = bo->tbo.base.size;
500
501         if (!va) {
502                 pr_err("Invalid VA when adding BO to VM\n");
503                 return -EINVAL;
504         }
505
506         if (is_aql)
507                 va += bo_size;
508
509         bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
510         if (!bo_va_entry)
511                 return -ENOMEM;
512
513         pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
514                         va + bo_size, vm);
515
516         /* Add BO to VM internal data structures*/
517         bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
518         if (!bo_va_entry->bo_va) {
519                 ret = -EINVAL;
520                 pr_err("Failed to add BO object to VM. ret == %d\n",
521                                 ret);
522                 goto err_vmadd;
523         }
524
525         bo_va_entry->va = va;
526         bo_va_entry->pte_flags = get_pte_flags(adev, mem);
527         bo_va_entry->kgd_dev = (void *)adev;
528         list_add(&bo_va_entry->bo_list, list_bo_va);
529
530         if (p_bo_va_entry)
531                 *p_bo_va_entry = bo_va_entry;
532
533         /* Allocate validate page tables if needed */
534         ret = vm_validate_pt_pd_bos(vm);
535         if (ret) {
536                 pr_err("validate_pt_pd_bos() failed\n");
537                 goto err_alloc_pts;
538         }
539
540         return 0;
541
542 err_alloc_pts:
543         amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
544         list_del(&bo_va_entry->bo_list);
545 err_vmadd:
546         kfree(bo_va_entry);
547         return ret;
548 }
549
550 static void remove_bo_from_vm(struct amdgpu_device *adev,
551                 struct kfd_bo_va_list *entry, unsigned long size)
552 {
553         pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
554                         entry->va,
555                         entry->va + size, entry);
556         amdgpu_vm_bo_rmv(adev, entry->bo_va);
557         list_del(&entry->bo_list);
558         kfree(entry);
559 }
560
561 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
562                                 struct amdkfd_process_info *process_info,
563                                 bool userptr)
564 {
565         struct ttm_validate_buffer *entry = &mem->validate_list;
566         struct amdgpu_bo *bo = mem->bo;
567
568         INIT_LIST_HEAD(&entry->head);
569         entry->num_shared = 1;
570         entry->bo = &bo->tbo;
571         mutex_lock(&process_info->lock);
572         if (userptr)
573                 list_add_tail(&entry->head, &process_info->userptr_valid_list);
574         else
575                 list_add_tail(&entry->head, &process_info->kfd_bo_list);
576         mutex_unlock(&process_info->lock);
577 }
578
579 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
580                 struct amdkfd_process_info *process_info)
581 {
582         struct ttm_validate_buffer *bo_list_entry;
583
584         bo_list_entry = &mem->validate_list;
585         mutex_lock(&process_info->lock);
586         list_del(&bo_list_entry->head);
587         mutex_unlock(&process_info->lock);
588 }
589
590 /* Initializes user pages. It registers the MMU notifier and validates
591  * the userptr BO in the GTT domain.
592  *
593  * The BO must already be on the userptr_valid_list. Otherwise an
594  * eviction and restore may happen that leaves the new BO unmapped
595  * with the user mode queues running.
596  *
597  * Takes the process_info->lock to protect against concurrent restore
598  * workers.
599  *
600  * Returns 0 for success, negative errno for errors.
601  */
602 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
603 {
604         struct amdkfd_process_info *process_info = mem->process_info;
605         struct amdgpu_bo *bo = mem->bo;
606         struct ttm_operation_ctx ctx = { true, false };
607         int ret = 0;
608
609         mutex_lock(&process_info->lock);
610
611         ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
612         if (ret) {
613                 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
614                 goto out;
615         }
616
617         ret = amdgpu_mn_register(bo, user_addr);
618         if (ret) {
619                 pr_err("%s: Failed to register MMU notifier: %d\n",
620                        __func__, ret);
621                 goto out;
622         }
623
624         ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
625         if (ret) {
626                 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
627                 goto unregister_out;
628         }
629
630         ret = amdgpu_bo_reserve(bo, true);
631         if (ret) {
632                 pr_err("%s: Failed to reserve BO\n", __func__);
633                 goto release_out;
634         }
635         amdgpu_bo_placement_from_domain(bo, mem->domain);
636         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
637         if (ret)
638                 pr_err("%s: failed to validate BO\n", __func__);
639         amdgpu_bo_unreserve(bo);
640
641 release_out:
642         amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
643 unregister_out:
644         if (ret)
645                 amdgpu_mn_unregister(bo);
646 out:
647         mutex_unlock(&process_info->lock);
648         return ret;
649 }
650
651 /* Reserving a BO and its page table BOs must happen atomically to
652  * avoid deadlocks. Some operations update multiple VMs at once. Track
653  * all the reservation info in a context structure. Optionally a sync
654  * object can track VM updates.
655  */
656 struct bo_vm_reservation_context {
657         struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
658         unsigned int n_vms;                 /* Number of VMs reserved       */
659         struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
660         struct ww_acquire_ctx ticket;       /* Reservation ticket           */
661         struct list_head list, duplicates;  /* BO lists                     */
662         struct amdgpu_sync *sync;           /* Pointer to sync object       */
663         bool reserved;                      /* Whether BOs are reserved     */
664 };
665
666 enum bo_vm_match {
667         BO_VM_NOT_MAPPED = 0,   /* Match VMs where a BO is not mapped */
668         BO_VM_MAPPED,           /* Match VMs where a BO is mapped     */
669         BO_VM_ALL,              /* Match all VMs a BO was added to    */
670 };
671
672 /**
673  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
674  * @mem: KFD BO structure.
675  * @vm: the VM to reserve.
676  * @ctx: the struct that will be used in unreserve_bo_and_vms().
677  */
678 static int reserve_bo_and_vm(struct kgd_mem *mem,
679                               struct amdgpu_vm *vm,
680                               struct bo_vm_reservation_context *ctx)
681 {
682         struct amdgpu_bo *bo = mem->bo;
683         int ret;
684
685         WARN_ON(!vm);
686
687         ctx->reserved = false;
688         ctx->n_vms = 1;
689         ctx->sync = &mem->sync;
690
691         INIT_LIST_HEAD(&ctx->list);
692         INIT_LIST_HEAD(&ctx->duplicates);
693
694         ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
695         if (!ctx->vm_pd)
696                 return -ENOMEM;
697
698         ctx->kfd_bo.priority = 0;
699         ctx->kfd_bo.tv.bo = &bo->tbo;
700         ctx->kfd_bo.tv.num_shared = 1;
701         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
702
703         amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
704
705         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
706                                      false, &ctx->duplicates);
707         if (ret) {
708                 pr_err("Failed to reserve buffers in ttm.\n");
709                 kfree(ctx->vm_pd);
710                 ctx->vm_pd = NULL;
711                 return ret;
712         }
713
714         ctx->reserved = true;
715         return 0;
716 }
717
718 /**
719  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
720  * @mem: KFD BO structure.
721  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
722  * is used. Otherwise, a single VM associated with the BO.
723  * @map_type: the mapping status that will be used to filter the VMs.
724  * @ctx: the struct that will be used in unreserve_bo_and_vms().
725  *
726  * Returns 0 for success, negative for failure.
727  */
728 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
729                                 struct amdgpu_vm *vm, enum bo_vm_match map_type,
730                                 struct bo_vm_reservation_context *ctx)
731 {
732         struct amdgpu_bo *bo = mem->bo;
733         struct kfd_bo_va_list *entry;
734         unsigned int i;
735         int ret;
736
737         ctx->reserved = false;
738         ctx->n_vms = 0;
739         ctx->vm_pd = NULL;
740         ctx->sync = &mem->sync;
741
742         INIT_LIST_HEAD(&ctx->list);
743         INIT_LIST_HEAD(&ctx->duplicates);
744
745         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
746                 if ((vm && vm != entry->bo_va->base.vm) ||
747                         (entry->is_mapped != map_type
748                         && map_type != BO_VM_ALL))
749                         continue;
750
751                 ctx->n_vms++;
752         }
753
754         if (ctx->n_vms != 0) {
755                 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
756                                      GFP_KERNEL);
757                 if (!ctx->vm_pd)
758                         return -ENOMEM;
759         }
760
761         ctx->kfd_bo.priority = 0;
762         ctx->kfd_bo.tv.bo = &bo->tbo;
763         ctx->kfd_bo.tv.num_shared = 1;
764         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
765
766         i = 0;
767         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
768                 if ((vm && vm != entry->bo_va->base.vm) ||
769                         (entry->is_mapped != map_type
770                         && map_type != BO_VM_ALL))
771                         continue;
772
773                 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
774                                 &ctx->vm_pd[i]);
775                 i++;
776         }
777
778         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
779                                      false, &ctx->duplicates);
780         if (ret) {
781                 pr_err("Failed to reserve buffers in ttm.\n");
782                 kfree(ctx->vm_pd);
783                 ctx->vm_pd = NULL;
784                 return ret;
785         }
786
787         ctx->reserved = true;
788         return 0;
789 }
790
791 /**
792  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
793  * @ctx: Reservation context to unreserve
794  * @wait: Optionally wait for a sync object representing pending VM updates
795  * @intr: Whether the wait is interruptible
796  *
797  * Also frees any resources allocated in
798  * reserve_bo_and_(cond_)vm(s). Returns the status from
799  * amdgpu_sync_wait.
800  */
801 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
802                                  bool wait, bool intr)
803 {
804         int ret = 0;
805
806         if (wait)
807                 ret = amdgpu_sync_wait(ctx->sync, intr);
808
809         if (ctx->reserved)
810                 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
811         kfree(ctx->vm_pd);
812
813         ctx->sync = NULL;
814
815         ctx->reserved = false;
816         ctx->vm_pd = NULL;
817
818         return ret;
819 }
820
821 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
822                                 struct kfd_bo_va_list *entry,
823                                 struct amdgpu_sync *sync)
824 {
825         struct amdgpu_bo_va *bo_va = entry->bo_va;
826         struct amdgpu_vm *vm = bo_va->base.vm;
827
828         amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
829
830         amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
831
832         amdgpu_sync_fence(sync, bo_va->last_pt_update);
833
834         return 0;
835 }
836
837 static int update_gpuvm_pte(struct amdgpu_device *adev,
838                 struct kfd_bo_va_list *entry,
839                 struct amdgpu_sync *sync)
840 {
841         int ret;
842         struct amdgpu_bo_va *bo_va = entry->bo_va;
843
844         /* Update the page tables  */
845         ret = amdgpu_vm_bo_update(adev, bo_va, false);
846         if (ret) {
847                 pr_err("amdgpu_vm_bo_update failed\n");
848                 return ret;
849         }
850
851         return amdgpu_sync_fence(sync, bo_va->last_pt_update);
852 }
853
854 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
855                 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
856                 bool no_update_pte)
857 {
858         int ret;
859
860         /* Set virtual address for the allocation */
861         ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
862                                amdgpu_bo_size(entry->bo_va->base.bo),
863                                entry->pte_flags);
864         if (ret) {
865                 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
866                                 entry->va, ret);
867                 return ret;
868         }
869
870         if (no_update_pte)
871                 return 0;
872
873         ret = update_gpuvm_pte(adev, entry, sync);
874         if (ret) {
875                 pr_err("update_gpuvm_pte() failed\n");
876                 goto update_gpuvm_pte_failed;
877         }
878
879         return 0;
880
881 update_gpuvm_pte_failed:
882         unmap_bo_from_gpuvm(adev, entry, sync);
883         return ret;
884 }
885
886 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
887 {
888         struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
889
890         if (!sg)
891                 return NULL;
892         if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
893                 kfree(sg);
894                 return NULL;
895         }
896         sg->sgl->dma_address = addr;
897         sg->sgl->length = size;
898 #ifdef CONFIG_NEED_SG_DMA_LENGTH
899         sg->sgl->dma_length = size;
900 #endif
901         return sg;
902 }
903
904 static int process_validate_vms(struct amdkfd_process_info *process_info)
905 {
906         struct amdgpu_vm *peer_vm;
907         int ret;
908
909         list_for_each_entry(peer_vm, &process_info->vm_list_head,
910                             vm_list_node) {
911                 ret = vm_validate_pt_pd_bos(peer_vm);
912                 if (ret)
913                         return ret;
914         }
915
916         return 0;
917 }
918
919 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
920                                  struct amdgpu_sync *sync)
921 {
922         struct amdgpu_vm *peer_vm;
923         int ret;
924
925         list_for_each_entry(peer_vm, &process_info->vm_list_head,
926                             vm_list_node) {
927                 struct amdgpu_bo *pd = peer_vm->root.base.bo;
928
929                 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
930                                        AMDGPU_SYNC_NE_OWNER,
931                                        AMDGPU_FENCE_OWNER_KFD);
932                 if (ret)
933                         return ret;
934         }
935
936         return 0;
937 }
938
939 static int process_update_pds(struct amdkfd_process_info *process_info,
940                               struct amdgpu_sync *sync)
941 {
942         struct amdgpu_vm *peer_vm;
943         int ret;
944
945         list_for_each_entry(peer_vm, &process_info->vm_list_head,
946                             vm_list_node) {
947                 ret = vm_update_pds(peer_vm, sync);
948                 if (ret)
949                         return ret;
950         }
951
952         return 0;
953 }
954
955 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
956                        struct dma_fence **ef)
957 {
958         struct amdkfd_process_info *info = NULL;
959         int ret;
960
961         if (!*process_info) {
962                 info = kzalloc(sizeof(*info), GFP_KERNEL);
963                 if (!info)
964                         return -ENOMEM;
965
966                 mutex_init(&info->lock);
967                 INIT_LIST_HEAD(&info->vm_list_head);
968                 INIT_LIST_HEAD(&info->kfd_bo_list);
969                 INIT_LIST_HEAD(&info->userptr_valid_list);
970                 INIT_LIST_HEAD(&info->userptr_inval_list);
971
972                 info->eviction_fence =
973                         amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
974                                                    current->mm,
975                                                    NULL);
976                 if (!info->eviction_fence) {
977                         pr_err("Failed to create eviction fence\n");
978                         ret = -ENOMEM;
979                         goto create_evict_fence_fail;
980                 }
981
982                 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
983                 atomic_set(&info->evicted_bos, 0);
984                 INIT_DELAYED_WORK(&info->restore_userptr_work,
985                                   amdgpu_amdkfd_restore_userptr_worker);
986
987                 *process_info = info;
988                 *ef = dma_fence_get(&info->eviction_fence->base);
989         }
990
991         vm->process_info = *process_info;
992
993         /* Validate page directory and attach eviction fence */
994         ret = amdgpu_bo_reserve(vm->root.base.bo, true);
995         if (ret)
996                 goto reserve_pd_fail;
997         ret = vm_validate_pt_pd_bos(vm);
998         if (ret) {
999                 pr_err("validate_pt_pd_bos() failed\n");
1000                 goto validate_pd_fail;
1001         }
1002         ret = amdgpu_bo_sync_wait(vm->root.base.bo,
1003                                   AMDGPU_FENCE_OWNER_KFD, false);
1004         if (ret)
1005                 goto wait_pd_fail;
1006         ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
1007         if (ret)
1008                 goto reserve_shared_fail;
1009         amdgpu_bo_fence(vm->root.base.bo,
1010                         &vm->process_info->eviction_fence->base, true);
1011         amdgpu_bo_unreserve(vm->root.base.bo);
1012
1013         /* Update process info */
1014         mutex_lock(&vm->process_info->lock);
1015         list_add_tail(&vm->vm_list_node,
1016                         &(vm->process_info->vm_list_head));
1017         vm->process_info->n_vms++;
1018         mutex_unlock(&vm->process_info->lock);
1019
1020         return 0;
1021
1022 reserve_shared_fail:
1023 wait_pd_fail:
1024 validate_pd_fail:
1025         amdgpu_bo_unreserve(vm->root.base.bo);
1026 reserve_pd_fail:
1027         vm->process_info = NULL;
1028         if (info) {
1029                 /* Two fence references: one in info and one in *ef */
1030                 dma_fence_put(&info->eviction_fence->base);
1031                 dma_fence_put(*ef);
1032                 *ef = NULL;
1033                 *process_info = NULL;
1034                 put_pid(info->pid);
1035 create_evict_fence_fail:
1036                 mutex_destroy(&info->lock);
1037                 kfree(info);
1038         }
1039         return ret;
1040 }
1041
1042 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1043                                            struct file *filp, u32 pasid,
1044                                            void **process_info,
1045                                            struct dma_fence **ef)
1046 {
1047         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1048         struct amdgpu_fpriv *drv_priv;
1049         struct amdgpu_vm *avm;
1050         int ret;
1051
1052         ret = amdgpu_file_to_fpriv(filp, &drv_priv);
1053         if (ret)
1054                 return ret;
1055         avm = &drv_priv->vm;
1056
1057         /* Already a compute VM? */
1058         if (avm->process_info)
1059                 return -EINVAL;
1060
1061         /* Convert VM into a compute VM */
1062         ret = amdgpu_vm_make_compute(adev, avm, pasid);
1063         if (ret)
1064                 return ret;
1065
1066         /* Initialize KFD part of the VM and process info */
1067         ret = init_kfd_vm(avm, process_info, ef);
1068         if (ret)
1069                 return ret;
1070
1071         amdgpu_vm_set_task_info(avm);
1072
1073         return 0;
1074 }
1075
1076 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1077                                     struct amdgpu_vm *vm)
1078 {
1079         struct amdkfd_process_info *process_info = vm->process_info;
1080         struct amdgpu_bo *pd = vm->root.base.bo;
1081
1082         if (!process_info)
1083                 return;
1084
1085         /* Release eviction fence from PD */
1086         amdgpu_bo_reserve(pd, false);
1087         amdgpu_bo_fence(pd, NULL, false);
1088         amdgpu_bo_unreserve(pd);
1089
1090         /* Update process info */
1091         mutex_lock(&process_info->lock);
1092         process_info->n_vms--;
1093         list_del(&vm->vm_list_node);
1094         mutex_unlock(&process_info->lock);
1095
1096         vm->process_info = NULL;
1097
1098         /* Release per-process resources when last compute VM is destroyed */
1099         if (!process_info->n_vms) {
1100                 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1101                 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1102                 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1103
1104                 dma_fence_put(&process_info->eviction_fence->base);
1105                 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1106                 put_pid(process_info->pid);
1107                 mutex_destroy(&process_info->lock);
1108                 kfree(process_info);
1109         }
1110 }
1111
1112 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv)
1113 {
1114         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1115         struct amdgpu_vm *avm;
1116
1117         if (WARN_ON(!kgd || !drm_priv))
1118                 return;
1119
1120         avm = drm_priv_to_vm(drm_priv);
1121
1122         pr_debug("Releasing process vm %p\n", avm);
1123
1124         /* The original pasid of amdgpu vm has already been
1125          * released during making a amdgpu vm to a compute vm
1126          * The current pasid is managed by kfd and will be
1127          * released on kfd process destroy. Set amdgpu pasid
1128          * to 0 to avoid duplicate release.
1129          */
1130         amdgpu_vm_release_compute(adev, avm);
1131 }
1132
1133 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
1134 {
1135         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1136         struct amdgpu_bo *pd = avm->root.base.bo;
1137         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1138
1139         if (adev->asic_type < CHIP_VEGA10)
1140                 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1141         return avm->pd_phys_addr;
1142 }
1143
1144 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1145                 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1146                 void *drm_priv, struct kgd_mem **mem,
1147                 uint64_t *offset, uint32_t flags)
1148 {
1149         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1150         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1151         enum ttm_bo_type bo_type = ttm_bo_type_device;
1152         struct sg_table *sg = NULL;
1153         uint64_t user_addr = 0;
1154         struct amdgpu_bo *bo;
1155         struct drm_gem_object *gobj;
1156         u32 domain, alloc_domain;
1157         u64 alloc_flags;
1158         int ret;
1159
1160         /*
1161          * Check on which domain to allocate BO
1162          */
1163         if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1164                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1165                 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1166                 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1167                         AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1168                         AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1169         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1170                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1171                 alloc_flags = 0;
1172         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1173                 domain = AMDGPU_GEM_DOMAIN_GTT;
1174                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1175                 alloc_flags = 0;
1176                 if (!offset || !*offset)
1177                         return -EINVAL;
1178                 user_addr = untagged_addr(*offset);
1179         } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1180                         KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1181                 domain = AMDGPU_GEM_DOMAIN_GTT;
1182                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1183                 bo_type = ttm_bo_type_sg;
1184                 alloc_flags = 0;
1185                 if (size > UINT_MAX)
1186                         return -EINVAL;
1187                 sg = create_doorbell_sg(*offset, size);
1188                 if (!sg)
1189                         return -ENOMEM;
1190         } else {
1191                 return -EINVAL;
1192         }
1193
1194         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1195         if (!*mem) {
1196                 ret = -ENOMEM;
1197                 goto err;
1198         }
1199         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1200         mutex_init(&(*mem)->lock);
1201         (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1202
1203         /* Workaround for AQL queue wraparound bug. Map the same
1204          * memory twice. That means we only actually allocate half
1205          * the memory.
1206          */
1207         if ((*mem)->aql_queue)
1208                 size = size >> 1;
1209
1210         (*mem)->alloc_flags = flags;
1211
1212         amdgpu_sync_create(&(*mem)->sync);
1213
1214         ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1215         if (ret) {
1216                 pr_debug("Insufficient memory\n");
1217                 goto err_reserve_limit;
1218         }
1219
1220         pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1221                         va, size, domain_string(alloc_domain));
1222
1223         ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
1224                                        bo_type, NULL, &gobj);
1225         if (ret) {
1226                 pr_debug("Failed to create BO on domain %s. ret %d\n",
1227                          domain_string(alloc_domain), ret);
1228                 goto err_bo_create;
1229         }
1230         ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1231         if (ret) {
1232                 pr_debug("Failed to allow vma node access. ret %d\n", ret);
1233                 goto err_node_allow;
1234         }
1235         bo = gem_to_amdgpu_bo(gobj);
1236         if (bo_type == ttm_bo_type_sg) {
1237                 bo->tbo.sg = sg;
1238                 bo->tbo.ttm->sg = sg;
1239         }
1240         bo->kfd_bo = *mem;
1241         (*mem)->bo = bo;
1242         if (user_addr)
1243                 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
1244
1245         (*mem)->va = va;
1246         (*mem)->domain = domain;
1247         (*mem)->mapped_to_gpu_memory = 0;
1248         (*mem)->process_info = avm->process_info;
1249         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1250
1251         if (user_addr) {
1252                 ret = init_user_pages(*mem, user_addr);
1253                 if (ret)
1254                         goto allocate_init_user_pages_failed;
1255         }
1256
1257         if (offset)
1258                 *offset = amdgpu_bo_mmap_offset(bo);
1259
1260         return 0;
1261
1262 allocate_init_user_pages_failed:
1263         remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1264         drm_vma_node_revoke(&gobj->vma_node, drm_priv);
1265 err_node_allow:
1266         amdgpu_bo_unref(&bo);
1267         /* Don't unreserve system mem limit twice */
1268         goto err_reserve_limit;
1269 err_bo_create:
1270         unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1271 err_reserve_limit:
1272         mutex_destroy(&(*mem)->lock);
1273         kfree(*mem);
1274 err:
1275         if (sg) {
1276                 sg_free_table(sg);
1277                 kfree(sg);
1278         }
1279         return ret;
1280 }
1281
1282 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1283                 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
1284                 uint64_t *size)
1285 {
1286         struct amdkfd_process_info *process_info = mem->process_info;
1287         unsigned long bo_size = mem->bo->tbo.base.size;
1288         struct kfd_bo_va_list *entry, *tmp;
1289         struct bo_vm_reservation_context ctx;
1290         struct ttm_validate_buffer *bo_list_entry;
1291         unsigned int mapped_to_gpu_memory;
1292         int ret;
1293         bool is_imported = false;
1294
1295         mutex_lock(&mem->lock);
1296         mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1297         is_imported = mem->is_imported;
1298         mutex_unlock(&mem->lock);
1299         /* lock is not needed after this, since mem is unused and will
1300          * be freed anyway
1301          */
1302
1303         if (mapped_to_gpu_memory > 0) {
1304                 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1305                                 mem->va, bo_size);
1306                 return -EBUSY;
1307         }
1308
1309         /* Make sure restore workers don't access the BO any more */
1310         bo_list_entry = &mem->validate_list;
1311         mutex_lock(&process_info->lock);
1312         list_del(&bo_list_entry->head);
1313         mutex_unlock(&process_info->lock);
1314
1315         /* No more MMU notifiers */
1316         amdgpu_mn_unregister(mem->bo);
1317
1318         ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1319         if (unlikely(ret))
1320                 return ret;
1321
1322         /* The eviction fence should be removed by the last unmap.
1323          * TODO: Log an error condition if the bo still has the eviction fence
1324          * attached
1325          */
1326         amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1327                                         process_info->eviction_fence);
1328         pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1329                 mem->va + bo_size * (1 + mem->aql_queue));
1330
1331         /* Remove from VM internal data structures */
1332         list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1333                 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1334                                 entry, bo_size);
1335
1336         ret = unreserve_bo_and_vms(&ctx, false, false);
1337
1338         /* Free the sync object */
1339         amdgpu_sync_free(&mem->sync);
1340
1341         /* If the SG is not NULL, it's one we created for a doorbell or mmio
1342          * remap BO. We need to free it.
1343          */
1344         if (mem->bo->tbo.sg) {
1345                 sg_free_table(mem->bo->tbo.sg);
1346                 kfree(mem->bo->tbo.sg);
1347         }
1348
1349         /* Update the size of the BO being freed if it was allocated from
1350          * VRAM and is not imported.
1351          */
1352         if (size) {
1353                 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1354                     (!is_imported))
1355                         *size = bo_size;
1356                 else
1357                         *size = 0;
1358         }
1359
1360         /* Free the BO*/
1361         drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
1362         drm_gem_object_put(&mem->bo->tbo.base);
1363         mutex_destroy(&mem->lock);
1364         kfree(mem);
1365
1366         return ret;
1367 }
1368
1369 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1370                 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
1371 {
1372         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1373         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1374         int ret;
1375         struct amdgpu_bo *bo;
1376         uint32_t domain;
1377         struct kfd_bo_va_list *entry;
1378         struct bo_vm_reservation_context ctx;
1379         struct kfd_bo_va_list *bo_va_entry = NULL;
1380         struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1381         unsigned long bo_size;
1382         bool is_invalid_userptr = false;
1383
1384         bo = mem->bo;
1385         if (!bo) {
1386                 pr_err("Invalid BO when mapping memory to GPU\n");
1387                 return -EINVAL;
1388         }
1389
1390         /* Make sure restore is not running concurrently. Since we
1391          * don't map invalid userptr BOs, we rely on the next restore
1392          * worker to do the mapping
1393          */
1394         mutex_lock(&mem->process_info->lock);
1395
1396         /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1397          * sure that the MMU notifier is no longer running
1398          * concurrently and the queues are actually stopped
1399          */
1400         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1401                 mmap_write_lock(current->mm);
1402                 is_invalid_userptr = atomic_read(&mem->invalid);
1403                 mmap_write_unlock(current->mm);
1404         }
1405
1406         mutex_lock(&mem->lock);
1407
1408         domain = mem->domain;
1409         bo_size = bo->tbo.base.size;
1410
1411         pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1412                         mem->va,
1413                         mem->va + bo_size * (1 + mem->aql_queue),
1414                         avm, domain_string(domain));
1415
1416         ret = reserve_bo_and_vm(mem, avm, &ctx);
1417         if (unlikely(ret))
1418                 goto out;
1419
1420         /* Userptr can be marked as "not invalid", but not actually be
1421          * validated yet (still in the system domain). In that case
1422          * the queues are still stopped and we can leave mapping for
1423          * the next restore worker
1424          */
1425         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1426             bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1427                 is_invalid_userptr = true;
1428
1429         if (check_if_add_bo_to_vm(avm, mem)) {
1430                 ret = add_bo_to_vm(adev, mem, avm, false,
1431                                 &bo_va_entry);
1432                 if (ret)
1433                         goto add_bo_to_vm_failed;
1434                 if (mem->aql_queue) {
1435                         ret = add_bo_to_vm(adev, mem, avm,
1436                                         true, &bo_va_entry_aql);
1437                         if (ret)
1438                                 goto add_bo_to_vm_failed_aql;
1439                 }
1440         } else {
1441                 ret = vm_validate_pt_pd_bos(avm);
1442                 if (unlikely(ret))
1443                         goto add_bo_to_vm_failed;
1444         }
1445
1446         if (mem->mapped_to_gpu_memory == 0 &&
1447             !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1448                 /* Validate BO only once. The eviction fence gets added to BO
1449                  * the first time it is mapped. Validate will wait for all
1450                  * background evictions to complete.
1451                  */
1452                 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1453                 if (ret) {
1454                         pr_debug("Validate failed\n");
1455                         goto map_bo_to_gpuvm_failed;
1456                 }
1457         }
1458
1459         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1460                 if (entry->bo_va->base.vm == avm && !entry->is_mapped) {
1461                         pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1462                                         entry->va, entry->va + bo_size,
1463                                         entry);
1464
1465                         ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1466                                               is_invalid_userptr);
1467                         if (ret) {
1468                                 pr_err("Failed to map bo to gpuvm\n");
1469                                 goto map_bo_to_gpuvm_failed;
1470                         }
1471
1472                         ret = vm_update_pds(avm, ctx.sync);
1473                         if (ret) {
1474                                 pr_err("Failed to update page directories\n");
1475                                 goto map_bo_to_gpuvm_failed;
1476                         }
1477
1478                         entry->is_mapped = true;
1479                         mem->mapped_to_gpu_memory++;
1480                         pr_debug("\t INC mapping count %d\n",
1481                                         mem->mapped_to_gpu_memory);
1482                 }
1483         }
1484
1485         if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
1486                 amdgpu_bo_fence(bo,
1487                                 &avm->process_info->eviction_fence->base,
1488                                 true);
1489         ret = unreserve_bo_and_vms(&ctx, false, false);
1490
1491         goto out;
1492
1493 map_bo_to_gpuvm_failed:
1494         if (bo_va_entry_aql)
1495                 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1496 add_bo_to_vm_failed_aql:
1497         if (bo_va_entry)
1498                 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1499 add_bo_to_vm_failed:
1500         unreserve_bo_and_vms(&ctx, false, false);
1501 out:
1502         mutex_unlock(&mem->process_info->lock);
1503         mutex_unlock(&mem->lock);
1504         return ret;
1505 }
1506
1507 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1508                 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
1509 {
1510         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1511         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1512         struct amdkfd_process_info *process_info = avm->process_info;
1513         unsigned long bo_size = mem->bo->tbo.base.size;
1514         struct kfd_bo_va_list *entry;
1515         struct bo_vm_reservation_context ctx;
1516         int ret;
1517
1518         mutex_lock(&mem->lock);
1519
1520         ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
1521         if (unlikely(ret))
1522                 goto out;
1523         /* If no VMs were reserved, it means the BO wasn't actually mapped */
1524         if (ctx.n_vms == 0) {
1525                 ret = -EINVAL;
1526                 goto unreserve_out;
1527         }
1528
1529         ret = vm_validate_pt_pd_bos(avm);
1530         if (unlikely(ret))
1531                 goto unreserve_out;
1532
1533         pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1534                 mem->va,
1535                 mem->va + bo_size * (1 + mem->aql_queue),
1536                 avm);
1537
1538         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1539                 if (entry->bo_va->base.vm == avm && entry->is_mapped) {
1540                         pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1541                                         entry->va,
1542                                         entry->va + bo_size,
1543                                         entry);
1544
1545                         ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1546                         if (ret == 0) {
1547                                 entry->is_mapped = false;
1548                         } else {
1549                                 pr_err("failed to unmap VA 0x%llx\n",
1550                                                 mem->va);
1551                                 goto unreserve_out;
1552                         }
1553
1554                         mem->mapped_to_gpu_memory--;
1555                         pr_debug("\t DEC mapping count %d\n",
1556                                         mem->mapped_to_gpu_memory);
1557                 }
1558         }
1559
1560         /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1561          * required.
1562          */
1563         if (mem->mapped_to_gpu_memory == 0 &&
1564             !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
1565             !mem->bo->tbo.pin_count)
1566                 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1567                                                 process_info->eviction_fence);
1568
1569 unreserve_out:
1570         unreserve_bo_and_vms(&ctx, false, false);
1571 out:
1572         mutex_unlock(&mem->lock);
1573         return ret;
1574 }
1575
1576 int amdgpu_amdkfd_gpuvm_sync_memory(
1577                 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1578 {
1579         struct amdgpu_sync sync;
1580         int ret;
1581
1582         amdgpu_sync_create(&sync);
1583
1584         mutex_lock(&mem->lock);
1585         amdgpu_sync_clone(&mem->sync, &sync);
1586         mutex_unlock(&mem->lock);
1587
1588         ret = amdgpu_sync_wait(&sync, intr);
1589         amdgpu_sync_free(&sync);
1590         return ret;
1591 }
1592
1593 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1594                 struct kgd_mem *mem, void **kptr, uint64_t *size)
1595 {
1596         int ret;
1597         struct amdgpu_bo *bo = mem->bo;
1598
1599         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1600                 pr_err("userptr can't be mapped to kernel\n");
1601                 return -EINVAL;
1602         }
1603
1604         /* delete kgd_mem from kfd_bo_list to avoid re-validating
1605          * this BO in BO's restoring after eviction.
1606          */
1607         mutex_lock(&mem->process_info->lock);
1608
1609         ret = amdgpu_bo_reserve(bo, true);
1610         if (ret) {
1611                 pr_err("Failed to reserve bo. ret %d\n", ret);
1612                 goto bo_reserve_failed;
1613         }
1614
1615         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1616         if (ret) {
1617                 pr_err("Failed to pin bo. ret %d\n", ret);
1618                 goto pin_failed;
1619         }
1620
1621         ret = amdgpu_bo_kmap(bo, kptr);
1622         if (ret) {
1623                 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1624                 goto kmap_failed;
1625         }
1626
1627         amdgpu_amdkfd_remove_eviction_fence(
1628                 bo, mem->process_info->eviction_fence);
1629         list_del_init(&mem->validate_list.head);
1630
1631         if (size)
1632                 *size = amdgpu_bo_size(bo);
1633
1634         amdgpu_bo_unreserve(bo);
1635
1636         mutex_unlock(&mem->process_info->lock);
1637         return 0;
1638
1639 kmap_failed:
1640         amdgpu_bo_unpin(bo);
1641 pin_failed:
1642         amdgpu_bo_unreserve(bo);
1643 bo_reserve_failed:
1644         mutex_unlock(&mem->process_info->lock);
1645
1646         return ret;
1647 }
1648
1649 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1650                                               struct kfd_vm_fault_info *mem)
1651 {
1652         struct amdgpu_device *adev;
1653
1654         adev = (struct amdgpu_device *)kgd;
1655         if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1656                 *mem = *adev->gmc.vm_fault_info;
1657                 mb();
1658                 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1659         }
1660         return 0;
1661 }
1662
1663 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1664                                       struct dma_buf *dma_buf,
1665                                       uint64_t va, void *drm_priv,
1666                                       struct kgd_mem **mem, uint64_t *size,
1667                                       uint64_t *mmap_offset)
1668 {
1669         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1670         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1671         struct drm_gem_object *obj;
1672         struct amdgpu_bo *bo;
1673         int ret;
1674
1675         if (dma_buf->ops != &amdgpu_dmabuf_ops)
1676                 /* Can't handle non-graphics buffers */
1677                 return -EINVAL;
1678
1679         obj = dma_buf->priv;
1680         if (drm_to_adev(obj->dev) != adev)
1681                 /* Can't handle buffers from other devices */
1682                 return -EINVAL;
1683
1684         bo = gem_to_amdgpu_bo(obj);
1685         if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1686                                     AMDGPU_GEM_DOMAIN_GTT)))
1687                 /* Only VRAM and GTT BOs are supported */
1688                 return -EINVAL;
1689
1690         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1691         if (!*mem)
1692                 return -ENOMEM;
1693
1694         ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
1695         if (ret) {
1696                 kfree(mem);
1697                 return ret;
1698         }
1699
1700         if (size)
1701                 *size = amdgpu_bo_size(bo);
1702
1703         if (mmap_offset)
1704                 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1705
1706         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1707         mutex_init(&(*mem)->lock);
1708
1709         (*mem)->alloc_flags =
1710                 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1711                 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1712                 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1713                 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1714
1715         drm_gem_object_get(&bo->tbo.base);
1716         (*mem)->bo = bo;
1717         (*mem)->va = va;
1718         (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1719                 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1720         (*mem)->mapped_to_gpu_memory = 0;
1721         (*mem)->process_info = avm->process_info;
1722         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1723         amdgpu_sync_create(&(*mem)->sync);
1724         (*mem)->is_imported = true;
1725
1726         return 0;
1727 }
1728
1729 /* Evict a userptr BO by stopping the queues if necessary
1730  *
1731  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1732  * cannot do any memory allocations, and cannot take any locks that
1733  * are held elsewhere while allocating memory. Therefore this is as
1734  * simple as possible, using atomic counters.
1735  *
1736  * It doesn't do anything to the BO itself. The real work happens in
1737  * restore, where we get updated page addresses. This function only
1738  * ensures that GPU access to the BO is stopped.
1739  */
1740 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1741                                 struct mm_struct *mm)
1742 {
1743         struct amdkfd_process_info *process_info = mem->process_info;
1744         int evicted_bos;
1745         int r = 0;
1746
1747         atomic_inc(&mem->invalid);
1748         evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1749         if (evicted_bos == 1) {
1750                 /* First eviction, stop the queues */
1751                 r = kgd2kfd_quiesce_mm(mm);
1752                 if (r)
1753                         pr_err("Failed to quiesce KFD\n");
1754                 schedule_delayed_work(&process_info->restore_userptr_work,
1755                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1756         }
1757
1758         return r;
1759 }
1760
1761 /* Update invalid userptr BOs
1762  *
1763  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1764  * userptr_inval_list and updates user pages for all BOs that have
1765  * been invalidated since their last update.
1766  */
1767 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1768                                      struct mm_struct *mm)
1769 {
1770         struct kgd_mem *mem, *tmp_mem;
1771         struct amdgpu_bo *bo;
1772         struct ttm_operation_ctx ctx = { false, false };
1773         int invalid, ret;
1774
1775         /* Move all invalidated BOs to the userptr_inval_list and
1776          * release their user pages by migration to the CPU domain
1777          */
1778         list_for_each_entry_safe(mem, tmp_mem,
1779                                  &process_info->userptr_valid_list,
1780                                  validate_list.head) {
1781                 if (!atomic_read(&mem->invalid))
1782                         continue; /* BO is still valid */
1783
1784                 bo = mem->bo;
1785
1786                 if (amdgpu_bo_reserve(bo, true))
1787                         return -EAGAIN;
1788                 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1789                 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1790                 amdgpu_bo_unreserve(bo);
1791                 if (ret) {
1792                         pr_err("%s: Failed to invalidate userptr BO\n",
1793                                __func__);
1794                         return -EAGAIN;
1795                 }
1796
1797                 list_move_tail(&mem->validate_list.head,
1798                                &process_info->userptr_inval_list);
1799         }
1800
1801         if (list_empty(&process_info->userptr_inval_list))
1802                 return 0; /* All evicted userptr BOs were freed */
1803
1804         /* Go through userptr_inval_list and update any invalid user_pages */
1805         list_for_each_entry(mem, &process_info->userptr_inval_list,
1806                             validate_list.head) {
1807                 invalid = atomic_read(&mem->invalid);
1808                 if (!invalid)
1809                         /* BO hasn't been invalidated since the last
1810                          * revalidation attempt. Keep its BO list.
1811                          */
1812                         continue;
1813
1814                 bo = mem->bo;
1815
1816                 /* Get updated user pages */
1817                 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1818                 if (ret) {
1819                         pr_debug("%s: Failed to get user pages: %d\n",
1820                                 __func__, ret);
1821
1822                         /* Return error -EBUSY or -ENOMEM, retry restore */
1823                         return ret;
1824                 }
1825
1826                 /*
1827                  * FIXME: Cannot ignore the return code, must hold
1828                  * notifier_lock
1829                  */
1830                 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1831
1832                 /* Mark the BO as valid unless it was invalidated
1833                  * again concurrently.
1834                  */
1835                 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1836                         return -EAGAIN;
1837         }
1838
1839         return 0;
1840 }
1841
1842 /* Validate invalid userptr BOs
1843  *
1844  * Validates BOs on the userptr_inval_list, and moves them back to the
1845  * userptr_valid_list. Also updates GPUVM page tables with new page
1846  * addresses and waits for the page table updates to complete.
1847  */
1848 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1849 {
1850         struct amdgpu_bo_list_entry *pd_bo_list_entries;
1851         struct list_head resv_list, duplicates;
1852         struct ww_acquire_ctx ticket;
1853         struct amdgpu_sync sync;
1854
1855         struct amdgpu_vm *peer_vm;
1856         struct kgd_mem *mem, *tmp_mem;
1857         struct amdgpu_bo *bo;
1858         struct ttm_operation_ctx ctx = { false, false };
1859         int i, ret;
1860
1861         pd_bo_list_entries = kcalloc(process_info->n_vms,
1862                                      sizeof(struct amdgpu_bo_list_entry),
1863                                      GFP_KERNEL);
1864         if (!pd_bo_list_entries) {
1865                 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1866                 ret = -ENOMEM;
1867                 goto out_no_mem;
1868         }
1869
1870         INIT_LIST_HEAD(&resv_list);
1871         INIT_LIST_HEAD(&duplicates);
1872
1873         /* Get all the page directory BOs that need to be reserved */
1874         i = 0;
1875         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1876                             vm_list_node)
1877                 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1878                                     &pd_bo_list_entries[i++]);
1879         /* Add the userptr_inval_list entries to resv_list */
1880         list_for_each_entry(mem, &process_info->userptr_inval_list,
1881                             validate_list.head) {
1882                 list_add_tail(&mem->resv_list.head, &resv_list);
1883                 mem->resv_list.bo = mem->validate_list.bo;
1884                 mem->resv_list.num_shared = mem->validate_list.num_shared;
1885         }
1886
1887         /* Reserve all BOs and page tables for validation */
1888         ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1889         WARN(!list_empty(&duplicates), "Duplicates should be empty");
1890         if (ret)
1891                 goto out_free;
1892
1893         amdgpu_sync_create(&sync);
1894
1895         ret = process_validate_vms(process_info);
1896         if (ret)
1897                 goto unreserve_out;
1898
1899         /* Validate BOs and update GPUVM page tables */
1900         list_for_each_entry_safe(mem, tmp_mem,
1901                                  &process_info->userptr_inval_list,
1902                                  validate_list.head) {
1903                 struct kfd_bo_va_list *bo_va_entry;
1904
1905                 bo = mem->bo;
1906
1907                 /* Validate the BO if we got user pages */
1908                 if (bo->tbo.ttm->pages[0]) {
1909                         amdgpu_bo_placement_from_domain(bo, mem->domain);
1910                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1911                         if (ret) {
1912                                 pr_err("%s: failed to validate BO\n", __func__);
1913                                 goto unreserve_out;
1914                         }
1915                 }
1916
1917                 list_move_tail(&mem->validate_list.head,
1918                                &process_info->userptr_valid_list);
1919
1920                 /* Update mapping. If the BO was not validated
1921                  * (because we couldn't get user pages), this will
1922                  * clear the page table entries, which will result in
1923                  * VM faults if the GPU tries to access the invalid
1924                  * memory.
1925                  */
1926                 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1927                         if (!bo_va_entry->is_mapped)
1928                                 continue;
1929
1930                         ret = update_gpuvm_pte((struct amdgpu_device *)
1931                                                bo_va_entry->kgd_dev,
1932                                                bo_va_entry, &sync);
1933                         if (ret) {
1934                                 pr_err("%s: update PTE failed\n", __func__);
1935                                 /* make sure this gets validated again */
1936                                 atomic_inc(&mem->invalid);
1937                                 goto unreserve_out;
1938                         }
1939                 }
1940         }
1941
1942         /* Update page directories */
1943         ret = process_update_pds(process_info, &sync);
1944
1945 unreserve_out:
1946         ttm_eu_backoff_reservation(&ticket, &resv_list);
1947         amdgpu_sync_wait(&sync, false);
1948         amdgpu_sync_free(&sync);
1949 out_free:
1950         kfree(pd_bo_list_entries);
1951 out_no_mem:
1952
1953         return ret;
1954 }
1955
1956 /* Worker callback to restore evicted userptr BOs
1957  *
1958  * Tries to update and validate all userptr BOs. If successful and no
1959  * concurrent evictions happened, the queues are restarted. Otherwise,
1960  * reschedule for another attempt later.
1961  */
1962 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1963 {
1964         struct delayed_work *dwork = to_delayed_work(work);
1965         struct amdkfd_process_info *process_info =
1966                 container_of(dwork, struct amdkfd_process_info,
1967                              restore_userptr_work);
1968         struct task_struct *usertask;
1969         struct mm_struct *mm;
1970         int evicted_bos;
1971
1972         evicted_bos = atomic_read(&process_info->evicted_bos);
1973         if (!evicted_bos)
1974                 return;
1975
1976         /* Reference task and mm in case of concurrent process termination */
1977         usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1978         if (!usertask)
1979                 return;
1980         mm = get_task_mm(usertask);
1981         if (!mm) {
1982                 put_task_struct(usertask);
1983                 return;
1984         }
1985
1986         mutex_lock(&process_info->lock);
1987
1988         if (update_invalid_user_pages(process_info, mm))
1989                 goto unlock_out;
1990         /* userptr_inval_list can be empty if all evicted userptr BOs
1991          * have been freed. In that case there is nothing to validate
1992          * and we can just restart the queues.
1993          */
1994         if (!list_empty(&process_info->userptr_inval_list)) {
1995                 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1996                         goto unlock_out; /* Concurrent eviction, try again */
1997
1998                 if (validate_invalid_user_pages(process_info))
1999                         goto unlock_out;
2000         }
2001         /* Final check for concurrent evicton and atomic update. If
2002          * another eviction happens after successful update, it will
2003          * be a first eviction that calls quiesce_mm. The eviction
2004          * reference counting inside KFD will handle this case.
2005          */
2006         if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
2007             evicted_bos)
2008                 goto unlock_out;
2009         evicted_bos = 0;
2010         if (kgd2kfd_resume_mm(mm)) {
2011                 pr_err("%s: Failed to resume KFD\n", __func__);
2012                 /* No recovery from this failure. Probably the CP is
2013                  * hanging. No point trying again.
2014                  */
2015         }
2016
2017 unlock_out:
2018         mutex_unlock(&process_info->lock);
2019         mmput(mm);
2020         put_task_struct(usertask);
2021
2022         /* If validation failed, reschedule another attempt */
2023         if (evicted_bos)
2024                 schedule_delayed_work(&process_info->restore_userptr_work,
2025                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2026 }
2027
2028 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2029  *   KFD process identified by process_info
2030  *
2031  * @process_info: amdkfd_process_info of the KFD process
2032  *
2033  * After memory eviction, restore thread calls this function. The function
2034  * should be called when the Process is still valid. BO restore involves -
2035  *
2036  * 1.  Release old eviction fence and create new one
2037  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2038  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2039  *     BOs that need to be reserved.
2040  * 4.  Reserve all the BOs
2041  * 5.  Validate of PD and PT BOs.
2042  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2043  * 7.  Add fence to all PD and PT BOs.
2044  * 8.  Unreserve all BOs
2045  */
2046 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2047 {
2048         struct amdgpu_bo_list_entry *pd_bo_list;
2049         struct amdkfd_process_info *process_info = info;
2050         struct amdgpu_vm *peer_vm;
2051         struct kgd_mem *mem;
2052         struct bo_vm_reservation_context ctx;
2053         struct amdgpu_amdkfd_fence *new_fence;
2054         int ret = 0, i;
2055         struct list_head duplicate_save;
2056         struct amdgpu_sync sync_obj;
2057         unsigned long failed_size = 0;
2058         unsigned long total_size = 0;
2059
2060         INIT_LIST_HEAD(&duplicate_save);
2061         INIT_LIST_HEAD(&ctx.list);
2062         INIT_LIST_HEAD(&ctx.duplicates);
2063
2064         pd_bo_list = kcalloc(process_info->n_vms,
2065                              sizeof(struct amdgpu_bo_list_entry),
2066                              GFP_KERNEL);
2067         if (!pd_bo_list)
2068                 return -ENOMEM;
2069
2070         i = 0;
2071         mutex_lock(&process_info->lock);
2072         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2073                         vm_list_node)
2074                 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2075
2076         /* Reserve all BOs and page tables/directory. Add all BOs from
2077          * kfd_bo_list to ctx.list
2078          */
2079         list_for_each_entry(mem, &process_info->kfd_bo_list,
2080                             validate_list.head) {
2081
2082                 list_add_tail(&mem->resv_list.head, &ctx.list);
2083                 mem->resv_list.bo = mem->validate_list.bo;
2084                 mem->resv_list.num_shared = mem->validate_list.num_shared;
2085         }
2086
2087         ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2088                                      false, &duplicate_save);
2089         if (ret) {
2090                 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2091                 goto ttm_reserve_fail;
2092         }
2093
2094         amdgpu_sync_create(&sync_obj);
2095
2096         /* Validate PDs and PTs */
2097         ret = process_validate_vms(process_info);
2098         if (ret)
2099                 goto validate_map_fail;
2100
2101         ret = process_sync_pds_resv(process_info, &sync_obj);
2102         if (ret) {
2103                 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2104                 goto validate_map_fail;
2105         }
2106
2107         /* Validate BOs and map them to GPUVM (update VM page tables). */
2108         list_for_each_entry(mem, &process_info->kfd_bo_list,
2109                             validate_list.head) {
2110
2111                 struct amdgpu_bo *bo = mem->bo;
2112                 uint32_t domain = mem->domain;
2113                 struct kfd_bo_va_list *bo_va_entry;
2114
2115                 total_size += amdgpu_bo_size(bo);
2116
2117                 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2118                 if (ret) {
2119                         pr_debug("Memory eviction: Validate BOs failed\n");
2120                         failed_size += amdgpu_bo_size(bo);
2121                         ret = amdgpu_amdkfd_bo_validate(bo,
2122                                                 AMDGPU_GEM_DOMAIN_GTT, false);
2123                         if (ret) {
2124                                 pr_debug("Memory eviction: Try again\n");
2125                                 goto validate_map_fail;
2126                         }
2127                 }
2128                 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2129                 if (ret) {
2130                         pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2131                         goto validate_map_fail;
2132                 }
2133                 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2134                                     bo_list) {
2135                         ret = update_gpuvm_pte((struct amdgpu_device *)
2136                                               bo_va_entry->kgd_dev,
2137                                               bo_va_entry,
2138                                               &sync_obj);
2139                         if (ret) {
2140                                 pr_debug("Memory eviction: update PTE failed. Try again\n");
2141                                 goto validate_map_fail;
2142                         }
2143                 }
2144         }
2145
2146         if (failed_size)
2147                 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2148
2149         /* Update page directories */
2150         ret = process_update_pds(process_info, &sync_obj);
2151         if (ret) {
2152                 pr_debug("Memory eviction: update PDs failed. Try again\n");
2153                 goto validate_map_fail;
2154         }
2155
2156         /* Wait for validate and PT updates to finish */
2157         amdgpu_sync_wait(&sync_obj, false);
2158
2159         /* Release old eviction fence and create new one, because fence only
2160          * goes from unsignaled to signaled, fence cannot be reused.
2161          * Use context and mm from the old fence.
2162          */
2163         new_fence = amdgpu_amdkfd_fence_create(
2164                                 process_info->eviction_fence->base.context,
2165                                 process_info->eviction_fence->mm,
2166                                 NULL);
2167         if (!new_fence) {
2168                 pr_err("Failed to create eviction fence\n");
2169                 ret = -ENOMEM;
2170                 goto validate_map_fail;
2171         }
2172         dma_fence_put(&process_info->eviction_fence->base);
2173         process_info->eviction_fence = new_fence;
2174         *ef = dma_fence_get(&new_fence->base);
2175
2176         /* Attach new eviction fence to all BOs */
2177         list_for_each_entry(mem, &process_info->kfd_bo_list,
2178                 validate_list.head)
2179                 amdgpu_bo_fence(mem->bo,
2180                         &process_info->eviction_fence->base, true);
2181
2182         /* Attach eviction fence to PD / PT BOs */
2183         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2184                             vm_list_node) {
2185                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2186
2187                 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2188         }
2189
2190 validate_map_fail:
2191         ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2192         amdgpu_sync_free(&sync_obj);
2193 ttm_reserve_fail:
2194         mutex_unlock(&process_info->lock);
2195         kfree(pd_bo_list);
2196         return ret;
2197 }
2198
2199 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2200 {
2201         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2202         struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2203         int ret;
2204
2205         if (!info || !gws)
2206                 return -EINVAL;
2207
2208         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2209         if (!*mem)
2210                 return -ENOMEM;
2211
2212         mutex_init(&(*mem)->lock);
2213         INIT_LIST_HEAD(&(*mem)->bo_va_list);
2214         (*mem)->bo = amdgpu_bo_ref(gws_bo);
2215         (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2216         (*mem)->process_info = process_info;
2217         add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2218         amdgpu_sync_create(&(*mem)->sync);
2219
2220
2221         /* Validate gws bo the first time it is added to process */
2222         mutex_lock(&(*mem)->process_info->lock);
2223         ret = amdgpu_bo_reserve(gws_bo, false);
2224         if (unlikely(ret)) {
2225                 pr_err("Reserve gws bo failed %d\n", ret);
2226                 goto bo_reservation_failure;
2227         }
2228
2229         ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2230         if (ret) {
2231                 pr_err("GWS BO validate failed %d\n", ret);
2232                 goto bo_validation_failure;
2233         }
2234         /* GWS resource is shared b/t amdgpu and amdkfd
2235          * Add process eviction fence to bo so they can
2236          * evict each other.
2237          */
2238         ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2239         if (ret)
2240                 goto reserve_shared_fail;
2241         amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2242         amdgpu_bo_unreserve(gws_bo);
2243         mutex_unlock(&(*mem)->process_info->lock);
2244
2245         return ret;
2246
2247 reserve_shared_fail:
2248 bo_validation_failure:
2249         amdgpu_bo_unreserve(gws_bo);
2250 bo_reservation_failure:
2251         mutex_unlock(&(*mem)->process_info->lock);
2252         amdgpu_sync_free(&(*mem)->sync);
2253         remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2254         amdgpu_bo_unref(&gws_bo);
2255         mutex_destroy(&(*mem)->lock);
2256         kfree(*mem);
2257         *mem = NULL;
2258         return ret;
2259 }
2260
2261 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2262 {
2263         int ret;
2264         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2265         struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2266         struct amdgpu_bo *gws_bo = kgd_mem->bo;
2267
2268         /* Remove BO from process's validate list so restore worker won't touch
2269          * it anymore
2270          */
2271         remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2272
2273         ret = amdgpu_bo_reserve(gws_bo, false);
2274         if (unlikely(ret)) {
2275                 pr_err("Reserve gws bo failed %d\n", ret);
2276                 //TODO add BO back to validate_list?
2277                 return ret;
2278         }
2279         amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2280                         process_info->eviction_fence);
2281         amdgpu_bo_unreserve(gws_bo);
2282         amdgpu_sync_free(&kgd_mem->sync);
2283         amdgpu_bo_unref(&gws_bo);
2284         mutex_destroy(&kgd_mem->lock);
2285         kfree(mem);
2286         return 0;
2287 }
2288
2289 /* Returns GPU-specific tiling mode information */
2290 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2291                                 struct tile_config *config)
2292 {
2293         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2294
2295         config->gb_addr_config = adev->gfx.config.gb_addr_config;
2296         config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2297         config->num_tile_configs =
2298                         ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2299         config->macro_tile_config_ptr =
2300                         adev->gfx.config.macrotile_mode_array;
2301         config->num_macro_tile_configs =
2302                         ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2303
2304         /* Those values are not set from GFX9 onwards */
2305         config->num_banks = adev->gfx.config.num_banks;
2306         config->num_ranks = adev->gfx.config.num_ranks;
2307
2308         return 0;
2309 }
This page took 0.173742 seconds and 4 git commands to generate.