]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
Merge branch 'for-5.17/core' into for-linus
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
27
28 #include "amdgpu_object.h"
29 #include "amdgpu_gem.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_amdkfd.h"
32 #include "amdgpu_dma_buf.h"
33 #include <uapi/linux/kfd_ioctl.h>
34 #include "amdgpu_xgmi.h"
35
36 /* Userptr restore delay, just long enough to allow consecutive VM
37  * changes to accumulate
38  */
39 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
40
41 /* Impose limit on how much memory KFD can use */
42 static struct {
43         uint64_t max_system_mem_limit;
44         uint64_t max_ttm_mem_limit;
45         int64_t system_mem_used;
46         int64_t ttm_mem_used;
47         spinlock_t mem_limit_lock;
48 } kfd_mem_limit;
49
50 static const char * const domain_bit_to_string[] = {
51                 "CPU",
52                 "GTT",
53                 "VRAM",
54                 "GDS",
55                 "GWS",
56                 "OA"
57 };
58
59 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
60
61 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
62
63
64 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
65 {
66         return (struct amdgpu_device *)kgd;
67 }
68
69 static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
70                 struct kgd_mem *mem)
71 {
72         struct kfd_mem_attachment *entry;
73
74         list_for_each_entry(entry, &mem->attachments, list)
75                 if (entry->bo_va->base.vm == avm)
76                         return true;
77
78         return false;
79 }
80
81 /* Set memory usage limits. Current, limits are
82  *  System (TTM + userptr) memory - 15/16th System RAM
83  *  TTM memory - 3/8th System RAM
84  */
85 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
86 {
87         struct sysinfo si;
88         uint64_t mem;
89
90         si_meminfo(&si);
91         mem = si.freeram - si.freehigh;
92         mem *= si.mem_unit;
93
94         spin_lock_init(&kfd_mem_limit.mem_limit_lock);
95         kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
96         kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
97         pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
98                 (kfd_mem_limit.max_system_mem_limit >> 20),
99                 (kfd_mem_limit.max_ttm_mem_limit >> 20));
100 }
101
102 void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
103 {
104         kfd_mem_limit.system_mem_used += size;
105 }
106
107 /* Estimate page table size needed to represent a given memory size
108  *
109  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
110  * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
111  * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
112  * for 2MB pages for TLB efficiency. However, small allocations and
113  * fragmented system memory still need some 4KB pages. We choose a
114  * compromise that should work in most cases without reserving too
115  * much memory for page tables unnecessarily (factor 16K, >> 14).
116  */
117 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
118
119 static size_t amdgpu_amdkfd_acc_size(uint64_t size)
120 {
121         size >>= PAGE_SHIFT;
122         size *= sizeof(dma_addr_t) + sizeof(void *);
123
124         return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
125                 __roundup_pow_of_two(sizeof(struct ttm_tt)) +
126                 PAGE_ALIGN(size);
127 }
128
129 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
130                 uint64_t size, u32 domain, bool sg)
131 {
132         uint64_t reserved_for_pt =
133                 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
134         size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
135         int ret = 0;
136
137         acc_size = amdgpu_amdkfd_acc_size(size);
138
139         vram_needed = 0;
140         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
141                 /* TTM GTT memory */
142                 system_mem_needed = acc_size + size;
143                 ttm_mem_needed = acc_size + size;
144         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
145                 /* Userptr */
146                 system_mem_needed = acc_size + size;
147                 ttm_mem_needed = acc_size;
148         } else {
149                 /* VRAM and SG */
150                 system_mem_needed = acc_size;
151                 ttm_mem_needed = acc_size;
152                 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
153                         vram_needed = size;
154         }
155
156         spin_lock(&kfd_mem_limit.mem_limit_lock);
157
158         if (kfd_mem_limit.system_mem_used + system_mem_needed >
159             kfd_mem_limit.max_system_mem_limit)
160                 pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
161
162         if ((kfd_mem_limit.system_mem_used + system_mem_needed >
163              kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
164             (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
165              kfd_mem_limit.max_ttm_mem_limit) ||
166             (adev->kfd.vram_used + vram_needed >
167              adev->gmc.real_vram_size - reserved_for_pt)) {
168                 ret = -ENOMEM;
169         } else {
170                 kfd_mem_limit.system_mem_used += system_mem_needed;
171                 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
172                 adev->kfd.vram_used += vram_needed;
173         }
174
175         spin_unlock(&kfd_mem_limit.mem_limit_lock);
176         return ret;
177 }
178
179 static void unreserve_mem_limit(struct amdgpu_device *adev,
180                 uint64_t size, u32 domain, bool sg)
181 {
182         size_t acc_size;
183
184         acc_size = amdgpu_amdkfd_acc_size(size);
185
186         spin_lock(&kfd_mem_limit.mem_limit_lock);
187         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
188                 kfd_mem_limit.system_mem_used -= (acc_size + size);
189                 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
190         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
191                 kfd_mem_limit.system_mem_used -= (acc_size + size);
192                 kfd_mem_limit.ttm_mem_used -= acc_size;
193         } else {
194                 kfd_mem_limit.system_mem_used -= acc_size;
195                 kfd_mem_limit.ttm_mem_used -= acc_size;
196                 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
197                         adev->kfd.vram_used -= size;
198                         WARN_ONCE(adev->kfd.vram_used < 0,
199                                   "kfd VRAM memory accounting unbalanced");
200                 }
201         }
202         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
203                   "kfd system memory accounting unbalanced");
204         WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
205                   "kfd TTM memory accounting unbalanced");
206
207         spin_unlock(&kfd_mem_limit.mem_limit_lock);
208 }
209
210 void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
211 {
212         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
213         u32 domain = bo->preferred_domains;
214         bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
215
216         if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) {
217                 domain = AMDGPU_GEM_DOMAIN_CPU;
218                 sg = false;
219         }
220
221         unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
222
223         kfree(bo->kfd_bo);
224 }
225
226
227 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
228  *  reservation object.
229  *
230  * @bo: [IN] Remove eviction fence(s) from this BO
231  * @ef: [IN] This eviction fence is removed if it
232  *  is present in the shared list.
233  *
234  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
235  */
236 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
237                                         struct amdgpu_amdkfd_fence *ef)
238 {
239         struct dma_resv *resv = bo->tbo.base.resv;
240         struct dma_resv_list *old, *new;
241         unsigned int i, j, k;
242
243         if (!ef)
244                 return -EINVAL;
245
246         old = dma_resv_shared_list(resv);
247         if (!old)
248                 return 0;
249
250         new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
251         if (!new)
252                 return -ENOMEM;
253
254         /* Go through all the shared fences in the resevation object and sort
255          * the interesting ones to the end of the list.
256          */
257         for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
258                 struct dma_fence *f;
259
260                 f = rcu_dereference_protected(old->shared[i],
261                                               dma_resv_held(resv));
262
263                 if (f->context == ef->base.context)
264                         RCU_INIT_POINTER(new->shared[--j], f);
265                 else
266                         RCU_INIT_POINTER(new->shared[k++], f);
267         }
268         new->shared_max = old->shared_max;
269         new->shared_count = k;
270
271         /* Install the new fence list, seqcount provides the barriers */
272         write_seqcount_begin(&resv->seq);
273         RCU_INIT_POINTER(resv->fence, new);
274         write_seqcount_end(&resv->seq);
275
276         /* Drop the references to the removed fences or move them to ef_list */
277         for (i = j; i < old->shared_count; ++i) {
278                 struct dma_fence *f;
279
280                 f = rcu_dereference_protected(new->shared[i],
281                                               dma_resv_held(resv));
282                 dma_fence_put(f);
283         }
284         kfree_rcu(old, rcu);
285
286         return 0;
287 }
288
289 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
290 {
291         struct amdgpu_bo *root = bo;
292         struct amdgpu_vm_bo_base *vm_bo;
293         struct amdgpu_vm *vm;
294         struct amdkfd_process_info *info;
295         struct amdgpu_amdkfd_fence *ef;
296         int ret;
297
298         /* we can always get vm_bo from root PD bo.*/
299         while (root->parent)
300                 root = root->parent;
301
302         vm_bo = root->vm_bo;
303         if (!vm_bo)
304                 return 0;
305
306         vm = vm_bo->vm;
307         if (!vm)
308                 return 0;
309
310         info = vm->process_info;
311         if (!info || !info->eviction_fence)
312                 return 0;
313
314         ef = container_of(dma_fence_get(&info->eviction_fence->base),
315                         struct amdgpu_amdkfd_fence, base);
316
317         BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
318         ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
319         dma_resv_unlock(bo->tbo.base.resv);
320
321         dma_fence_put(&ef->base);
322         return ret;
323 }
324
325 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
326                                      bool wait)
327 {
328         struct ttm_operation_ctx ctx = { false, false };
329         int ret;
330
331         if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
332                  "Called with userptr BO"))
333                 return -EINVAL;
334
335         amdgpu_bo_placement_from_domain(bo, domain);
336
337         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
338         if (ret)
339                 goto validate_fail;
340         if (wait)
341                 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
342
343 validate_fail:
344         return ret;
345 }
346
347 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
348 {
349         return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
350 }
351
352 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
353  *
354  * Page directories are not updated here because huge page handling
355  * during page table updates can invalidate page directory entries
356  * again. Page directories are only updated after updating page
357  * tables.
358  */
359 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
360 {
361         struct amdgpu_bo *pd = vm->root.bo;
362         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
363         int ret;
364
365         ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
366         if (ret) {
367                 pr_err("failed to validate PT BOs\n");
368                 return ret;
369         }
370
371         ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
372         if (ret) {
373                 pr_err("failed to validate PD\n");
374                 return ret;
375         }
376
377         vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
378
379         if (vm->use_cpu_for_update) {
380                 ret = amdgpu_bo_kmap(pd, NULL);
381                 if (ret) {
382                         pr_err("failed to kmap PD, ret=%d\n", ret);
383                         return ret;
384                 }
385         }
386
387         return 0;
388 }
389
390 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
391 {
392         struct amdgpu_bo *pd = vm->root.bo;
393         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
394         int ret;
395
396         ret = amdgpu_vm_update_pdes(adev, vm, false);
397         if (ret)
398                 return ret;
399
400         return amdgpu_sync_fence(sync, vm->last_update);
401 }
402
403 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
404 {
405         struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
406         bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
407         bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED;
408         uint32_t mapping_flags;
409         uint64_t pte_flags;
410         bool snoop = false;
411
412         mapping_flags = AMDGPU_VM_PAGE_READABLE;
413         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
414                 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
415         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
416                 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
417
418         switch (adev->asic_type) {
419         case CHIP_ARCTURUS:
420                 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
421                         if (bo_adev == adev)
422                                 mapping_flags |= coherent ?
423                                         AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
424                         else
425                                 mapping_flags |= coherent ?
426                                         AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
427                 } else {
428                         mapping_flags |= coherent ?
429                                 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
430                 }
431                 break;
432         case CHIP_ALDEBARAN:
433                 if (coherent && uncached) {
434                         if (adev->gmc.xgmi.connected_to_cpu ||
435                                 !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
436                                 snoop = true;
437                         mapping_flags |= AMDGPU_VM_MTYPE_UC;
438                 } else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
439                         if (bo_adev == adev) {
440                                 mapping_flags |= coherent ?
441                                         AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
442                                 if (adev->gmc.xgmi.connected_to_cpu)
443                                         snoop = true;
444                         } else {
445                                 mapping_flags |= coherent ?
446                                         AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
447                                 if (amdgpu_xgmi_same_hive(adev, bo_adev))
448                                         snoop = true;
449                         }
450                 } else {
451                         snoop = true;
452                         mapping_flags |= coherent ?
453                                 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
454                 }
455                 break;
456         default:
457                 mapping_flags |= coherent ?
458                         AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
459         }
460
461         pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
462         pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
463
464         return pte_flags;
465 }
466
467 static int
468 kfd_mem_dmamap_userptr(struct kgd_mem *mem,
469                        struct kfd_mem_attachment *attachment)
470 {
471         enum dma_data_direction direction =
472                 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
473                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
474         struct ttm_operation_ctx ctx = {.interruptible = true};
475         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
476         struct amdgpu_device *adev = attachment->adev;
477         struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
478         struct ttm_tt *ttm = bo->tbo.ttm;
479         int ret;
480
481         ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
482         if (unlikely(!ttm->sg))
483                 return -ENOMEM;
484
485         if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
486                 return -EINVAL;
487
488         /* Same sequence as in amdgpu_ttm_tt_pin_userptr */
489         ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
490                                         ttm->num_pages, 0,
491                                         (u64)ttm->num_pages << PAGE_SHIFT,
492                                         GFP_KERNEL);
493         if (unlikely(ret))
494                 goto free_sg;
495
496         ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
497         if (unlikely(ret))
498                 goto release_sg;
499
500         drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address,
501                                        ttm->num_pages);
502
503         amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
504         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
505         if (ret)
506                 goto unmap_sg;
507
508         return 0;
509
510 unmap_sg:
511         dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
512 release_sg:
513         pr_err("DMA map userptr failed: %d\n", ret);
514         sg_free_table(ttm->sg);
515 free_sg:
516         kfree(ttm->sg);
517         ttm->sg = NULL;
518         return ret;
519 }
520
521 static int
522 kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
523 {
524         struct ttm_operation_ctx ctx = {.interruptible = true};
525         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
526
527         amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
528         return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
529 }
530
531 static int
532 kfd_mem_dmamap_attachment(struct kgd_mem *mem,
533                           struct kfd_mem_attachment *attachment)
534 {
535         switch (attachment->type) {
536         case KFD_MEM_ATT_SHARED:
537                 return 0;
538         case KFD_MEM_ATT_USERPTR:
539                 return kfd_mem_dmamap_userptr(mem, attachment);
540         case KFD_MEM_ATT_DMABUF:
541                 return kfd_mem_dmamap_dmabuf(attachment);
542         default:
543                 WARN_ON_ONCE(1);
544         }
545         return -EINVAL;
546 }
547
548 static void
549 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
550                          struct kfd_mem_attachment *attachment)
551 {
552         enum dma_data_direction direction =
553                 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
554                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
555         struct ttm_operation_ctx ctx = {.interruptible = false};
556         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
557         struct amdgpu_device *adev = attachment->adev;
558         struct ttm_tt *ttm = bo->tbo.ttm;
559
560         if (unlikely(!ttm->sg))
561                 return;
562
563         amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
564         ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
565
566         dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
567         sg_free_table(ttm->sg);
568         kfree(ttm->sg);
569         ttm->sg = NULL;
570 }
571
572 static void
573 kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
574 {
575         struct ttm_operation_ctx ctx = {.interruptible = true};
576         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
577
578         amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
579         ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
580 }
581
582 static void
583 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
584                             struct kfd_mem_attachment *attachment)
585 {
586         switch (attachment->type) {
587         case KFD_MEM_ATT_SHARED:
588                 break;
589         case KFD_MEM_ATT_USERPTR:
590                 kfd_mem_dmaunmap_userptr(mem, attachment);
591                 break;
592         case KFD_MEM_ATT_DMABUF:
593                 kfd_mem_dmaunmap_dmabuf(attachment);
594                 break;
595         default:
596                 WARN_ON_ONCE(1);
597         }
598 }
599
600 static int
601 kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem,
602                        struct amdgpu_bo **bo)
603 {
604         unsigned long bo_size = mem->bo->tbo.base.size;
605         struct drm_gem_object *gobj;
606         int ret;
607
608         ret = amdgpu_bo_reserve(mem->bo, false);
609         if (ret)
610                 return ret;
611
612         ret = amdgpu_gem_object_create(adev, bo_size, 1,
613                                        AMDGPU_GEM_DOMAIN_CPU,
614                                        AMDGPU_GEM_CREATE_PREEMPTIBLE,
615                                        ttm_bo_type_sg, mem->bo->tbo.base.resv,
616                                        &gobj);
617         amdgpu_bo_unreserve(mem->bo);
618         if (ret)
619                 return ret;
620
621         *bo = gem_to_amdgpu_bo(gobj);
622         (*bo)->parent = amdgpu_bo_ref(mem->bo);
623
624         return 0;
625 }
626
627 static int
628 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
629                       struct amdgpu_bo **bo)
630 {
631         struct drm_gem_object *gobj;
632         int ret;
633
634         if (!mem->dmabuf) {
635                 mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
636                         mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
637                                 DRM_RDWR : 0);
638                 if (IS_ERR(mem->dmabuf)) {
639                         ret = PTR_ERR(mem->dmabuf);
640                         mem->dmabuf = NULL;
641                         return ret;
642                 }
643         }
644
645         gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
646         if (IS_ERR(gobj))
647                 return PTR_ERR(gobj);
648
649         /* Import takes an extra reference on the dmabuf. Drop it now to
650          * avoid leaking it. We only need the one reference in
651          * kgd_mem->dmabuf.
652          */
653         dma_buf_put(mem->dmabuf);
654
655         *bo = gem_to_amdgpu_bo(gobj);
656         (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
657         (*bo)->parent = amdgpu_bo_ref(mem->bo);
658
659         return 0;
660 }
661
662 /* kfd_mem_attach - Add a BO to a VM
663  *
664  * Everything that needs to bo done only once when a BO is first added
665  * to a VM. It can later be mapped and unmapped many times without
666  * repeating these steps.
667  *
668  * 0. Create BO for DMA mapping, if needed
669  * 1. Allocate and initialize BO VA entry data structure
670  * 2. Add BO to the VM
671  * 3. Determine ASIC-specific PTE flags
672  * 4. Alloc page tables and directories if needed
673  * 4a.  Validate new page tables and directories
674  */
675 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
676                 struct amdgpu_vm *vm, bool is_aql)
677 {
678         struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
679         unsigned long bo_size = mem->bo->tbo.base.size;
680         uint64_t va = mem->va;
681         struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
682         struct amdgpu_bo *bo[2] = {NULL, NULL};
683         int i, ret;
684
685         if (!va) {
686                 pr_err("Invalid VA when adding BO to VM\n");
687                 return -EINVAL;
688         }
689
690         for (i = 0; i <= is_aql; i++) {
691                 attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
692                 if (unlikely(!attachment[i])) {
693                         ret = -ENOMEM;
694                         goto unwind;
695                 }
696
697                 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
698                          va + bo_size, vm);
699
700                 if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM &&
701                                         amdgpu_xgmi_same_hive(adev, bo_adev))) {
702                         /* Mappings on the local GPU and VRAM mappings in the
703                          * local hive share the original BO
704                          */
705                         attachment[i]->type = KFD_MEM_ATT_SHARED;
706                         bo[i] = mem->bo;
707                         drm_gem_object_get(&bo[i]->tbo.base);
708                 } else if (i > 0) {
709                         /* Multiple mappings on the same GPU share the BO */
710                         attachment[i]->type = KFD_MEM_ATT_SHARED;
711                         bo[i] = bo[0];
712                         drm_gem_object_get(&bo[i]->tbo.base);
713                 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
714                         /* Create an SG BO to DMA-map userptrs on other GPUs */
715                         attachment[i]->type = KFD_MEM_ATT_USERPTR;
716                         ret = kfd_mem_attach_userptr(adev, mem, &bo[i]);
717                         if (ret)
718                                 goto unwind;
719                 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT &&
720                            mem->bo->tbo.type != ttm_bo_type_sg) {
721                         /* GTT BOs use DMA-mapping ability of dynamic-attach
722                          * DMA bufs. TODO: The same should work for VRAM on
723                          * large-BAR GPUs.
724                          */
725                         attachment[i]->type = KFD_MEM_ATT_DMABUF;
726                         ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
727                         if (ret)
728                                 goto unwind;
729                 } else {
730                         /* FIXME: Need to DMA-map other BO types:
731                          * large-BAR VRAM, doorbells, MMIO remap
732                          */
733                         attachment[i]->type = KFD_MEM_ATT_SHARED;
734                         bo[i] = mem->bo;
735                         drm_gem_object_get(&bo[i]->tbo.base);
736                 }
737
738                 /* Add BO to VM internal data structures */
739                 ret = amdgpu_bo_reserve(bo[i], false);
740                 if (ret) {
741                         pr_debug("Unable to reserve BO during memory attach");
742                         goto unwind;
743                 }
744                 attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
745                 amdgpu_bo_unreserve(bo[i]);
746                 if (unlikely(!attachment[i]->bo_va)) {
747                         ret = -ENOMEM;
748                         pr_err("Failed to add BO object to VM. ret == %d\n",
749                                ret);
750                         goto unwind;
751                 }
752                 attachment[i]->va = va;
753                 attachment[i]->pte_flags = get_pte_flags(adev, mem);
754                 attachment[i]->adev = adev;
755                 list_add(&attachment[i]->list, &mem->attachments);
756
757                 va += bo_size;
758         }
759
760         return 0;
761
762 unwind:
763         for (; i >= 0; i--) {
764                 if (!attachment[i])
765                         continue;
766                 if (attachment[i]->bo_va) {
767                         amdgpu_bo_reserve(bo[i], true);
768                         amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va);
769                         amdgpu_bo_unreserve(bo[i]);
770                         list_del(&attachment[i]->list);
771                 }
772                 if (bo[i])
773                         drm_gem_object_put(&bo[i]->tbo.base);
774                 kfree(attachment[i]);
775         }
776         return ret;
777 }
778
779 static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
780 {
781         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
782
783         pr_debug("\t remove VA 0x%llx in entry %p\n",
784                         attachment->va, attachment);
785         amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va);
786         drm_gem_object_put(&bo->tbo.base);
787         list_del(&attachment->list);
788         kfree(attachment);
789 }
790
791 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
792                                 struct amdkfd_process_info *process_info,
793                                 bool userptr)
794 {
795         struct ttm_validate_buffer *entry = &mem->validate_list;
796         struct amdgpu_bo *bo = mem->bo;
797
798         INIT_LIST_HEAD(&entry->head);
799         entry->num_shared = 1;
800         entry->bo = &bo->tbo;
801         mutex_lock(&process_info->lock);
802         if (userptr)
803                 list_add_tail(&entry->head, &process_info->userptr_valid_list);
804         else
805                 list_add_tail(&entry->head, &process_info->kfd_bo_list);
806         mutex_unlock(&process_info->lock);
807 }
808
809 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
810                 struct amdkfd_process_info *process_info)
811 {
812         struct ttm_validate_buffer *bo_list_entry;
813
814         bo_list_entry = &mem->validate_list;
815         mutex_lock(&process_info->lock);
816         list_del(&bo_list_entry->head);
817         mutex_unlock(&process_info->lock);
818 }
819
820 /* Initializes user pages. It registers the MMU notifier and validates
821  * the userptr BO in the GTT domain.
822  *
823  * The BO must already be on the userptr_valid_list. Otherwise an
824  * eviction and restore may happen that leaves the new BO unmapped
825  * with the user mode queues running.
826  *
827  * Takes the process_info->lock to protect against concurrent restore
828  * workers.
829  *
830  * Returns 0 for success, negative errno for errors.
831  */
832 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
833 {
834         struct amdkfd_process_info *process_info = mem->process_info;
835         struct amdgpu_bo *bo = mem->bo;
836         struct ttm_operation_ctx ctx = { true, false };
837         int ret = 0;
838
839         mutex_lock(&process_info->lock);
840
841         ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
842         if (ret) {
843                 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
844                 goto out;
845         }
846
847         ret = amdgpu_mn_register(bo, user_addr);
848         if (ret) {
849                 pr_err("%s: Failed to register MMU notifier: %d\n",
850                        __func__, ret);
851                 goto out;
852         }
853
854         ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
855         if (ret) {
856                 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
857                 goto unregister_out;
858         }
859
860         ret = amdgpu_bo_reserve(bo, true);
861         if (ret) {
862                 pr_err("%s: Failed to reserve BO\n", __func__);
863                 goto release_out;
864         }
865         amdgpu_bo_placement_from_domain(bo, mem->domain);
866         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
867         if (ret)
868                 pr_err("%s: failed to validate BO\n", __func__);
869         amdgpu_bo_unreserve(bo);
870
871 release_out:
872         amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
873 unregister_out:
874         if (ret)
875                 amdgpu_mn_unregister(bo);
876 out:
877         mutex_unlock(&process_info->lock);
878         return ret;
879 }
880
881 /* Reserving a BO and its page table BOs must happen atomically to
882  * avoid deadlocks. Some operations update multiple VMs at once. Track
883  * all the reservation info in a context structure. Optionally a sync
884  * object can track VM updates.
885  */
886 struct bo_vm_reservation_context {
887         struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
888         unsigned int n_vms;                 /* Number of VMs reserved       */
889         struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
890         struct ww_acquire_ctx ticket;       /* Reservation ticket           */
891         struct list_head list, duplicates;  /* BO lists                     */
892         struct amdgpu_sync *sync;           /* Pointer to sync object       */
893         bool reserved;                      /* Whether BOs are reserved     */
894 };
895
896 enum bo_vm_match {
897         BO_VM_NOT_MAPPED = 0,   /* Match VMs where a BO is not mapped */
898         BO_VM_MAPPED,           /* Match VMs where a BO is mapped     */
899         BO_VM_ALL,              /* Match all VMs a BO was added to    */
900 };
901
902 /**
903  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
904  * @mem: KFD BO structure.
905  * @vm: the VM to reserve.
906  * @ctx: the struct that will be used in unreserve_bo_and_vms().
907  */
908 static int reserve_bo_and_vm(struct kgd_mem *mem,
909                               struct amdgpu_vm *vm,
910                               struct bo_vm_reservation_context *ctx)
911 {
912         struct amdgpu_bo *bo = mem->bo;
913         int ret;
914
915         WARN_ON(!vm);
916
917         ctx->reserved = false;
918         ctx->n_vms = 1;
919         ctx->sync = &mem->sync;
920
921         INIT_LIST_HEAD(&ctx->list);
922         INIT_LIST_HEAD(&ctx->duplicates);
923
924         ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
925         if (!ctx->vm_pd)
926                 return -ENOMEM;
927
928         ctx->kfd_bo.priority = 0;
929         ctx->kfd_bo.tv.bo = &bo->tbo;
930         ctx->kfd_bo.tv.num_shared = 1;
931         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
932
933         amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
934
935         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
936                                      false, &ctx->duplicates);
937         if (ret) {
938                 pr_err("Failed to reserve buffers in ttm.\n");
939                 kfree(ctx->vm_pd);
940                 ctx->vm_pd = NULL;
941                 return ret;
942         }
943
944         ctx->reserved = true;
945         return 0;
946 }
947
948 /**
949  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
950  * @mem: KFD BO structure.
951  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
952  * is used. Otherwise, a single VM associated with the BO.
953  * @map_type: the mapping status that will be used to filter the VMs.
954  * @ctx: the struct that will be used in unreserve_bo_and_vms().
955  *
956  * Returns 0 for success, negative for failure.
957  */
958 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
959                                 struct amdgpu_vm *vm, enum bo_vm_match map_type,
960                                 struct bo_vm_reservation_context *ctx)
961 {
962         struct amdgpu_bo *bo = mem->bo;
963         struct kfd_mem_attachment *entry;
964         unsigned int i;
965         int ret;
966
967         ctx->reserved = false;
968         ctx->n_vms = 0;
969         ctx->vm_pd = NULL;
970         ctx->sync = &mem->sync;
971
972         INIT_LIST_HEAD(&ctx->list);
973         INIT_LIST_HEAD(&ctx->duplicates);
974
975         list_for_each_entry(entry, &mem->attachments, list) {
976                 if ((vm && vm != entry->bo_va->base.vm) ||
977                         (entry->is_mapped != map_type
978                         && map_type != BO_VM_ALL))
979                         continue;
980
981                 ctx->n_vms++;
982         }
983
984         if (ctx->n_vms != 0) {
985                 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
986                                      GFP_KERNEL);
987                 if (!ctx->vm_pd)
988                         return -ENOMEM;
989         }
990
991         ctx->kfd_bo.priority = 0;
992         ctx->kfd_bo.tv.bo = &bo->tbo;
993         ctx->kfd_bo.tv.num_shared = 1;
994         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
995
996         i = 0;
997         list_for_each_entry(entry, &mem->attachments, list) {
998                 if ((vm && vm != entry->bo_va->base.vm) ||
999                         (entry->is_mapped != map_type
1000                         && map_type != BO_VM_ALL))
1001                         continue;
1002
1003                 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
1004                                 &ctx->vm_pd[i]);
1005                 i++;
1006         }
1007
1008         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
1009                                      false, &ctx->duplicates);
1010         if (ret) {
1011                 pr_err("Failed to reserve buffers in ttm.\n");
1012                 kfree(ctx->vm_pd);
1013                 ctx->vm_pd = NULL;
1014                 return ret;
1015         }
1016
1017         ctx->reserved = true;
1018         return 0;
1019 }
1020
1021 /**
1022  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
1023  * @ctx: Reservation context to unreserve
1024  * @wait: Optionally wait for a sync object representing pending VM updates
1025  * @intr: Whether the wait is interruptible
1026  *
1027  * Also frees any resources allocated in
1028  * reserve_bo_and_(cond_)vm(s). Returns the status from
1029  * amdgpu_sync_wait.
1030  */
1031 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
1032                                  bool wait, bool intr)
1033 {
1034         int ret = 0;
1035
1036         if (wait)
1037                 ret = amdgpu_sync_wait(ctx->sync, intr);
1038
1039         if (ctx->reserved)
1040                 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
1041         kfree(ctx->vm_pd);
1042
1043         ctx->sync = NULL;
1044
1045         ctx->reserved = false;
1046         ctx->vm_pd = NULL;
1047
1048         return ret;
1049 }
1050
1051 static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
1052                                 struct kfd_mem_attachment *entry,
1053                                 struct amdgpu_sync *sync)
1054 {
1055         struct amdgpu_bo_va *bo_va = entry->bo_va;
1056         struct amdgpu_device *adev = entry->adev;
1057         struct amdgpu_vm *vm = bo_va->base.vm;
1058
1059         amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
1060
1061         amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
1062
1063         amdgpu_sync_fence(sync, bo_va->last_pt_update);
1064
1065         kfd_mem_dmaunmap_attachment(mem, entry);
1066 }
1067
1068 static int update_gpuvm_pte(struct kgd_mem *mem,
1069                             struct kfd_mem_attachment *entry,
1070                             struct amdgpu_sync *sync,
1071                             bool *table_freed)
1072 {
1073         struct amdgpu_bo_va *bo_va = entry->bo_va;
1074         struct amdgpu_device *adev = entry->adev;
1075         int ret;
1076
1077         ret = kfd_mem_dmamap_attachment(mem, entry);
1078         if (ret)
1079                 return ret;
1080
1081         /* Update the page tables  */
1082         ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed);
1083         if (ret) {
1084                 pr_err("amdgpu_vm_bo_update failed\n");
1085                 return ret;
1086         }
1087
1088         return amdgpu_sync_fence(sync, bo_va->last_pt_update);
1089 }
1090
1091 static int map_bo_to_gpuvm(struct kgd_mem *mem,
1092                            struct kfd_mem_attachment *entry,
1093                            struct amdgpu_sync *sync,
1094                            bool no_update_pte,
1095                            bool *table_freed)
1096 {
1097         int ret;
1098
1099         /* Set virtual address for the allocation */
1100         ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1101                                amdgpu_bo_size(entry->bo_va->base.bo),
1102                                entry->pte_flags);
1103         if (ret) {
1104                 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1105                                 entry->va, ret);
1106                 return ret;
1107         }
1108
1109         if (no_update_pte)
1110                 return 0;
1111
1112         ret = update_gpuvm_pte(mem, entry, sync, table_freed);
1113         if (ret) {
1114                 pr_err("update_gpuvm_pte() failed\n");
1115                 goto update_gpuvm_pte_failed;
1116         }
1117
1118         return 0;
1119
1120 update_gpuvm_pte_failed:
1121         unmap_bo_from_gpuvm(mem, entry, sync);
1122         return ret;
1123 }
1124
1125 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
1126 {
1127         struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
1128
1129         if (!sg)
1130                 return NULL;
1131         if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
1132                 kfree(sg);
1133                 return NULL;
1134         }
1135         sg->sgl->dma_address = addr;
1136         sg->sgl->length = size;
1137 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1138         sg->sgl->dma_length = size;
1139 #endif
1140         return sg;
1141 }
1142
1143 static int process_validate_vms(struct amdkfd_process_info *process_info)
1144 {
1145         struct amdgpu_vm *peer_vm;
1146         int ret;
1147
1148         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1149                             vm_list_node) {
1150                 ret = vm_validate_pt_pd_bos(peer_vm);
1151                 if (ret)
1152                         return ret;
1153         }
1154
1155         return 0;
1156 }
1157
1158 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
1159                                  struct amdgpu_sync *sync)
1160 {
1161         struct amdgpu_vm *peer_vm;
1162         int ret;
1163
1164         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1165                             vm_list_node) {
1166                 struct amdgpu_bo *pd = peer_vm->root.bo;
1167
1168                 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1169                                        AMDGPU_SYNC_NE_OWNER,
1170                                        AMDGPU_FENCE_OWNER_KFD);
1171                 if (ret)
1172                         return ret;
1173         }
1174
1175         return 0;
1176 }
1177
1178 static int process_update_pds(struct amdkfd_process_info *process_info,
1179                               struct amdgpu_sync *sync)
1180 {
1181         struct amdgpu_vm *peer_vm;
1182         int ret;
1183
1184         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1185                             vm_list_node) {
1186                 ret = vm_update_pds(peer_vm, sync);
1187                 if (ret)
1188                         return ret;
1189         }
1190
1191         return 0;
1192 }
1193
1194 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
1195                        struct dma_fence **ef)
1196 {
1197         struct amdkfd_process_info *info = NULL;
1198         int ret;
1199
1200         if (!*process_info) {
1201                 info = kzalloc(sizeof(*info), GFP_KERNEL);
1202                 if (!info)
1203                         return -ENOMEM;
1204
1205                 mutex_init(&info->lock);
1206                 INIT_LIST_HEAD(&info->vm_list_head);
1207                 INIT_LIST_HEAD(&info->kfd_bo_list);
1208                 INIT_LIST_HEAD(&info->userptr_valid_list);
1209                 INIT_LIST_HEAD(&info->userptr_inval_list);
1210
1211                 info->eviction_fence =
1212                         amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
1213                                                    current->mm,
1214                                                    NULL);
1215                 if (!info->eviction_fence) {
1216                         pr_err("Failed to create eviction fence\n");
1217                         ret = -ENOMEM;
1218                         goto create_evict_fence_fail;
1219                 }
1220
1221                 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
1222                 atomic_set(&info->evicted_bos, 0);
1223                 INIT_DELAYED_WORK(&info->restore_userptr_work,
1224                                   amdgpu_amdkfd_restore_userptr_worker);
1225
1226                 *process_info = info;
1227                 *ef = dma_fence_get(&info->eviction_fence->base);
1228         }
1229
1230         vm->process_info = *process_info;
1231
1232         /* Validate page directory and attach eviction fence */
1233         ret = amdgpu_bo_reserve(vm->root.bo, true);
1234         if (ret)
1235                 goto reserve_pd_fail;
1236         ret = vm_validate_pt_pd_bos(vm);
1237         if (ret) {
1238                 pr_err("validate_pt_pd_bos() failed\n");
1239                 goto validate_pd_fail;
1240         }
1241         ret = amdgpu_bo_sync_wait(vm->root.bo,
1242                                   AMDGPU_FENCE_OWNER_KFD, false);
1243         if (ret)
1244                 goto wait_pd_fail;
1245         ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1);
1246         if (ret)
1247                 goto reserve_shared_fail;
1248         amdgpu_bo_fence(vm->root.bo,
1249                         &vm->process_info->eviction_fence->base, true);
1250         amdgpu_bo_unreserve(vm->root.bo);
1251
1252         /* Update process info */
1253         mutex_lock(&vm->process_info->lock);
1254         list_add_tail(&vm->vm_list_node,
1255                         &(vm->process_info->vm_list_head));
1256         vm->process_info->n_vms++;
1257         mutex_unlock(&vm->process_info->lock);
1258
1259         return 0;
1260
1261 reserve_shared_fail:
1262 wait_pd_fail:
1263 validate_pd_fail:
1264         amdgpu_bo_unreserve(vm->root.bo);
1265 reserve_pd_fail:
1266         vm->process_info = NULL;
1267         if (info) {
1268                 /* Two fence references: one in info and one in *ef */
1269                 dma_fence_put(&info->eviction_fence->base);
1270                 dma_fence_put(*ef);
1271                 *ef = NULL;
1272                 *process_info = NULL;
1273                 put_pid(info->pid);
1274 create_evict_fence_fail:
1275                 mutex_destroy(&info->lock);
1276                 kfree(info);
1277         }
1278         return ret;
1279 }
1280
1281 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1282                                            struct file *filp, u32 pasid,
1283                                            void **process_info,
1284                                            struct dma_fence **ef)
1285 {
1286         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1287         struct amdgpu_fpriv *drv_priv;
1288         struct amdgpu_vm *avm;
1289         int ret;
1290
1291         ret = amdgpu_file_to_fpriv(filp, &drv_priv);
1292         if (ret)
1293                 return ret;
1294         avm = &drv_priv->vm;
1295
1296         /* Already a compute VM? */
1297         if (avm->process_info)
1298                 return -EINVAL;
1299
1300         /* Free the original amdgpu allocated pasid,
1301          * will be replaced with kfd allocated pasid.
1302          */
1303         if (avm->pasid) {
1304                 amdgpu_pasid_free(avm->pasid);
1305                 amdgpu_vm_set_pasid(adev, avm, 0);
1306         }
1307
1308         /* Convert VM into a compute VM */
1309         ret = amdgpu_vm_make_compute(adev, avm);
1310         if (ret)
1311                 return ret;
1312
1313         ret = amdgpu_vm_set_pasid(adev, avm, pasid);
1314         if (ret)
1315                 return ret;
1316         /* Initialize KFD part of the VM and process info */
1317         ret = init_kfd_vm(avm, process_info, ef);
1318         if (ret)
1319                 return ret;
1320
1321         amdgpu_vm_set_task_info(avm);
1322
1323         return 0;
1324 }
1325
1326 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1327                                     struct amdgpu_vm *vm)
1328 {
1329         struct amdkfd_process_info *process_info = vm->process_info;
1330         struct amdgpu_bo *pd = vm->root.bo;
1331
1332         if (!process_info)
1333                 return;
1334
1335         /* Release eviction fence from PD */
1336         amdgpu_bo_reserve(pd, false);
1337         amdgpu_bo_fence(pd, NULL, false);
1338         amdgpu_bo_unreserve(pd);
1339
1340         /* Update process info */
1341         mutex_lock(&process_info->lock);
1342         process_info->n_vms--;
1343         list_del(&vm->vm_list_node);
1344         mutex_unlock(&process_info->lock);
1345
1346         vm->process_info = NULL;
1347
1348         /* Release per-process resources when last compute VM is destroyed */
1349         if (!process_info->n_vms) {
1350                 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1351                 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1352                 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1353
1354                 dma_fence_put(&process_info->eviction_fence->base);
1355                 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1356                 put_pid(process_info->pid);
1357                 mutex_destroy(&process_info->lock);
1358                 kfree(process_info);
1359         }
1360 }
1361
1362 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv)
1363 {
1364         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1365         struct amdgpu_vm *avm;
1366
1367         if (WARN_ON(!kgd || !drm_priv))
1368                 return;
1369
1370         avm = drm_priv_to_vm(drm_priv);
1371
1372         pr_debug("Releasing process vm %p\n", avm);
1373
1374         /* The original pasid of amdgpu vm has already been
1375          * released during making a amdgpu vm to a compute vm
1376          * The current pasid is managed by kfd and will be
1377          * released on kfd process destroy. Set amdgpu pasid
1378          * to 0 to avoid duplicate release.
1379          */
1380         amdgpu_vm_release_compute(adev, avm);
1381 }
1382
1383 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
1384 {
1385         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1386         struct amdgpu_bo *pd = avm->root.bo;
1387         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1388
1389         if (adev->asic_type < CHIP_VEGA10)
1390                 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1391         return avm->pd_phys_addr;
1392 }
1393
1394 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1395                 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1396                 void *drm_priv, struct kgd_mem **mem,
1397                 uint64_t *offset, uint32_t flags)
1398 {
1399         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1400         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1401         enum ttm_bo_type bo_type = ttm_bo_type_device;
1402         struct sg_table *sg = NULL;
1403         uint64_t user_addr = 0;
1404         struct amdgpu_bo *bo;
1405         struct drm_gem_object *gobj;
1406         u32 domain, alloc_domain;
1407         u64 alloc_flags;
1408         int ret;
1409
1410         /*
1411          * Check on which domain to allocate BO
1412          */
1413         if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1414                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1415                 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1416                 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1417                         AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
1418         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1419                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1420                 alloc_flags = 0;
1421         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1422                 domain = AMDGPU_GEM_DOMAIN_GTT;
1423                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1424                 alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
1425                 if (!offset || !*offset)
1426                         return -EINVAL;
1427                 user_addr = untagged_addr(*offset);
1428         } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1429                         KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1430                 domain = AMDGPU_GEM_DOMAIN_GTT;
1431                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1432                 bo_type = ttm_bo_type_sg;
1433                 alloc_flags = 0;
1434                 if (size > UINT_MAX)
1435                         return -EINVAL;
1436                 sg = create_doorbell_sg(*offset, size);
1437                 if (!sg)
1438                         return -ENOMEM;
1439         } else {
1440                 return -EINVAL;
1441         }
1442
1443         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1444         if (!*mem) {
1445                 ret = -ENOMEM;
1446                 goto err;
1447         }
1448         INIT_LIST_HEAD(&(*mem)->attachments);
1449         mutex_init(&(*mem)->lock);
1450         (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1451
1452         /* Workaround for AQL queue wraparound bug. Map the same
1453          * memory twice. That means we only actually allocate half
1454          * the memory.
1455          */
1456         if ((*mem)->aql_queue)
1457                 size = size >> 1;
1458
1459         (*mem)->alloc_flags = flags;
1460
1461         amdgpu_sync_create(&(*mem)->sync);
1462
1463         ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1464         if (ret) {
1465                 pr_debug("Insufficient memory\n");
1466                 goto err_reserve_limit;
1467         }
1468
1469         pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1470                         va, size, domain_string(alloc_domain));
1471
1472         ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
1473                                        bo_type, NULL, &gobj);
1474         if (ret) {
1475                 pr_debug("Failed to create BO on domain %s. ret %d\n",
1476                          domain_string(alloc_domain), ret);
1477                 goto err_bo_create;
1478         }
1479         ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1480         if (ret) {
1481                 pr_debug("Failed to allow vma node access. ret %d\n", ret);
1482                 goto err_node_allow;
1483         }
1484         bo = gem_to_amdgpu_bo(gobj);
1485         if (bo_type == ttm_bo_type_sg) {
1486                 bo->tbo.sg = sg;
1487                 bo->tbo.ttm->sg = sg;
1488         }
1489         bo->kfd_bo = *mem;
1490         (*mem)->bo = bo;
1491         if (user_addr)
1492                 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
1493
1494         (*mem)->va = va;
1495         (*mem)->domain = domain;
1496         (*mem)->mapped_to_gpu_memory = 0;
1497         (*mem)->process_info = avm->process_info;
1498         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1499
1500         if (user_addr) {
1501                 ret = init_user_pages(*mem, user_addr);
1502                 if (ret)
1503                         goto allocate_init_user_pages_failed;
1504         }
1505
1506         if (offset)
1507                 *offset = amdgpu_bo_mmap_offset(bo);
1508
1509         return 0;
1510
1511 allocate_init_user_pages_failed:
1512         remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1513         drm_vma_node_revoke(&gobj->vma_node, drm_priv);
1514 err_node_allow:
1515         drm_gem_object_put(gobj);
1516         /* Don't unreserve system mem limit twice */
1517         goto err_reserve_limit;
1518 err_bo_create:
1519         unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1520 err_reserve_limit:
1521         mutex_destroy(&(*mem)->lock);
1522         kfree(*mem);
1523 err:
1524         if (sg) {
1525                 sg_free_table(sg);
1526                 kfree(sg);
1527         }
1528         return ret;
1529 }
1530
1531 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1532                 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
1533                 uint64_t *size)
1534 {
1535         struct amdkfd_process_info *process_info = mem->process_info;
1536         unsigned long bo_size = mem->bo->tbo.base.size;
1537         struct kfd_mem_attachment *entry, *tmp;
1538         struct bo_vm_reservation_context ctx;
1539         struct ttm_validate_buffer *bo_list_entry;
1540         unsigned int mapped_to_gpu_memory;
1541         int ret;
1542         bool is_imported = false;
1543
1544         mutex_lock(&mem->lock);
1545         mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1546         is_imported = mem->is_imported;
1547         mutex_unlock(&mem->lock);
1548         /* lock is not needed after this, since mem is unused and will
1549          * be freed anyway
1550          */
1551
1552         if (mapped_to_gpu_memory > 0) {
1553                 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1554                                 mem->va, bo_size);
1555                 return -EBUSY;
1556         }
1557
1558         /* Make sure restore workers don't access the BO any more */
1559         bo_list_entry = &mem->validate_list;
1560         mutex_lock(&process_info->lock);
1561         list_del(&bo_list_entry->head);
1562         mutex_unlock(&process_info->lock);
1563
1564         /* No more MMU notifiers */
1565         amdgpu_mn_unregister(mem->bo);
1566
1567         ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1568         if (unlikely(ret))
1569                 return ret;
1570
1571         /* The eviction fence should be removed by the last unmap.
1572          * TODO: Log an error condition if the bo still has the eviction fence
1573          * attached
1574          */
1575         amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1576                                         process_info->eviction_fence);
1577         pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1578                 mem->va + bo_size * (1 + mem->aql_queue));
1579
1580         /* Remove from VM internal data structures */
1581         list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
1582                 kfd_mem_detach(entry);
1583
1584         ret = unreserve_bo_and_vms(&ctx, false, false);
1585
1586         /* Free the sync object */
1587         amdgpu_sync_free(&mem->sync);
1588
1589         /* If the SG is not NULL, it's one we created for a doorbell or mmio
1590          * remap BO. We need to free it.
1591          */
1592         if (mem->bo->tbo.sg) {
1593                 sg_free_table(mem->bo->tbo.sg);
1594                 kfree(mem->bo->tbo.sg);
1595         }
1596
1597         /* Update the size of the BO being freed if it was allocated from
1598          * VRAM and is not imported.
1599          */
1600         if (size) {
1601                 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1602                     (!is_imported))
1603                         *size = bo_size;
1604                 else
1605                         *size = 0;
1606         }
1607
1608         /* Free the BO*/
1609         drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
1610         if (mem->dmabuf)
1611                 dma_buf_put(mem->dmabuf);
1612         mutex_destroy(&mem->lock);
1613
1614         /* If this releases the last reference, it will end up calling
1615          * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why
1616          * this needs to be the last call here.
1617          */
1618         drm_gem_object_put(&mem->bo->tbo.base);
1619
1620         return ret;
1621 }
1622
1623 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1624                 struct kgd_dev *kgd, struct kgd_mem *mem,
1625                 void *drm_priv, bool *table_freed)
1626 {
1627         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1628         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1629         int ret;
1630         struct amdgpu_bo *bo;
1631         uint32_t domain;
1632         struct kfd_mem_attachment *entry;
1633         struct bo_vm_reservation_context ctx;
1634         unsigned long bo_size;
1635         bool is_invalid_userptr = false;
1636
1637         bo = mem->bo;
1638         if (!bo) {
1639                 pr_err("Invalid BO when mapping memory to GPU\n");
1640                 return -EINVAL;
1641         }
1642
1643         /* Make sure restore is not running concurrently. Since we
1644          * don't map invalid userptr BOs, we rely on the next restore
1645          * worker to do the mapping
1646          */
1647         mutex_lock(&mem->process_info->lock);
1648
1649         /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1650          * sure that the MMU notifier is no longer running
1651          * concurrently and the queues are actually stopped
1652          */
1653         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1654                 mmap_write_lock(current->mm);
1655                 is_invalid_userptr = atomic_read(&mem->invalid);
1656                 mmap_write_unlock(current->mm);
1657         }
1658
1659         mutex_lock(&mem->lock);
1660
1661         domain = mem->domain;
1662         bo_size = bo->tbo.base.size;
1663
1664         pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1665                         mem->va,
1666                         mem->va + bo_size * (1 + mem->aql_queue),
1667                         avm, domain_string(domain));
1668
1669         if (!kfd_mem_is_attached(avm, mem)) {
1670                 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
1671                 if (ret)
1672                         goto out;
1673         }
1674
1675         ret = reserve_bo_and_vm(mem, avm, &ctx);
1676         if (unlikely(ret))
1677                 goto out;
1678
1679         /* Userptr can be marked as "not invalid", but not actually be
1680          * validated yet (still in the system domain). In that case
1681          * the queues are still stopped and we can leave mapping for
1682          * the next restore worker
1683          */
1684         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1685             bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
1686                 is_invalid_userptr = true;
1687
1688         ret = vm_validate_pt_pd_bos(avm);
1689         if (unlikely(ret))
1690                 goto out_unreserve;
1691
1692         if (mem->mapped_to_gpu_memory == 0 &&
1693             !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1694                 /* Validate BO only once. The eviction fence gets added to BO
1695                  * the first time it is mapped. Validate will wait for all
1696                  * background evictions to complete.
1697                  */
1698                 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1699                 if (ret) {
1700                         pr_debug("Validate failed\n");
1701                         goto out_unreserve;
1702                 }
1703         }
1704
1705         list_for_each_entry(entry, &mem->attachments, list) {
1706                 if (entry->bo_va->base.vm != avm || entry->is_mapped)
1707                         continue;
1708
1709                 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1710                          entry->va, entry->va + bo_size, entry);
1711
1712                 ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
1713                                       is_invalid_userptr, table_freed);
1714                 if (ret) {
1715                         pr_err("Failed to map bo to gpuvm\n");
1716                         goto out_unreserve;
1717                 }
1718
1719                 ret = vm_update_pds(avm, ctx.sync);
1720                 if (ret) {
1721                         pr_err("Failed to update page directories\n");
1722                         goto out_unreserve;
1723                 }
1724
1725                 entry->is_mapped = true;
1726                 mem->mapped_to_gpu_memory++;
1727                 pr_debug("\t INC mapping count %d\n",
1728                          mem->mapped_to_gpu_memory);
1729         }
1730
1731         if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
1732                 amdgpu_bo_fence(bo,
1733                                 &avm->process_info->eviction_fence->base,
1734                                 true);
1735         ret = unreserve_bo_and_vms(&ctx, false, false);
1736
1737         /* Only apply no TLB flush on Aldebaran to
1738          * workaround regressions on other Asics.
1739          */
1740         if (table_freed && (adev->asic_type != CHIP_ALDEBARAN))
1741                 *table_freed = true;
1742
1743         goto out;
1744
1745 out_unreserve:
1746         unreserve_bo_and_vms(&ctx, false, false);
1747 out:
1748         mutex_unlock(&mem->process_info->lock);
1749         mutex_unlock(&mem->lock);
1750         return ret;
1751 }
1752
1753 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1754                 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
1755 {
1756         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1757         struct amdkfd_process_info *process_info = avm->process_info;
1758         unsigned long bo_size = mem->bo->tbo.base.size;
1759         struct kfd_mem_attachment *entry;
1760         struct bo_vm_reservation_context ctx;
1761         int ret;
1762
1763         mutex_lock(&mem->lock);
1764
1765         ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
1766         if (unlikely(ret))
1767                 goto out;
1768         /* If no VMs were reserved, it means the BO wasn't actually mapped */
1769         if (ctx.n_vms == 0) {
1770                 ret = -EINVAL;
1771                 goto unreserve_out;
1772         }
1773
1774         ret = vm_validate_pt_pd_bos(avm);
1775         if (unlikely(ret))
1776                 goto unreserve_out;
1777
1778         pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1779                 mem->va,
1780                 mem->va + bo_size * (1 + mem->aql_queue),
1781                 avm);
1782
1783         list_for_each_entry(entry, &mem->attachments, list) {
1784                 if (entry->bo_va->base.vm != avm || !entry->is_mapped)
1785                         continue;
1786
1787                 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1788                          entry->va, entry->va + bo_size, entry);
1789
1790                 unmap_bo_from_gpuvm(mem, entry, ctx.sync);
1791                 entry->is_mapped = false;
1792
1793                 mem->mapped_to_gpu_memory--;
1794                 pr_debug("\t DEC mapping count %d\n",
1795                          mem->mapped_to_gpu_memory);
1796         }
1797
1798         /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1799          * required.
1800          */
1801         if (mem->mapped_to_gpu_memory == 0 &&
1802             !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
1803             !mem->bo->tbo.pin_count)
1804                 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1805                                                 process_info->eviction_fence);
1806
1807 unreserve_out:
1808         unreserve_bo_and_vms(&ctx, false, false);
1809 out:
1810         mutex_unlock(&mem->lock);
1811         return ret;
1812 }
1813
1814 int amdgpu_amdkfd_gpuvm_sync_memory(
1815                 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1816 {
1817         struct amdgpu_sync sync;
1818         int ret;
1819
1820         amdgpu_sync_create(&sync);
1821
1822         mutex_lock(&mem->lock);
1823         amdgpu_sync_clone(&mem->sync, &sync);
1824         mutex_unlock(&mem->lock);
1825
1826         ret = amdgpu_sync_wait(&sync, intr);
1827         amdgpu_sync_free(&sync);
1828         return ret;
1829 }
1830
1831 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1832                 struct kgd_mem *mem, void **kptr, uint64_t *size)
1833 {
1834         int ret;
1835         struct amdgpu_bo *bo = mem->bo;
1836
1837         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1838                 pr_err("userptr can't be mapped to kernel\n");
1839                 return -EINVAL;
1840         }
1841
1842         /* delete kgd_mem from kfd_bo_list to avoid re-validating
1843          * this BO in BO's restoring after eviction.
1844          */
1845         mutex_lock(&mem->process_info->lock);
1846
1847         ret = amdgpu_bo_reserve(bo, true);
1848         if (ret) {
1849                 pr_err("Failed to reserve bo. ret %d\n", ret);
1850                 goto bo_reserve_failed;
1851         }
1852
1853         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1854         if (ret) {
1855                 pr_err("Failed to pin bo. ret %d\n", ret);
1856                 goto pin_failed;
1857         }
1858
1859         ret = amdgpu_bo_kmap(bo, kptr);
1860         if (ret) {
1861                 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1862                 goto kmap_failed;
1863         }
1864
1865         amdgpu_amdkfd_remove_eviction_fence(
1866                 bo, mem->process_info->eviction_fence);
1867         list_del_init(&mem->validate_list.head);
1868
1869         if (size)
1870                 *size = amdgpu_bo_size(bo);
1871
1872         amdgpu_bo_unreserve(bo);
1873
1874         mutex_unlock(&mem->process_info->lock);
1875         return 0;
1876
1877 kmap_failed:
1878         amdgpu_bo_unpin(bo);
1879 pin_failed:
1880         amdgpu_bo_unreserve(bo);
1881 bo_reserve_failed:
1882         mutex_unlock(&mem->process_info->lock);
1883
1884         return ret;
1885 }
1886
1887 void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kgd_mem *mem)
1888 {
1889         struct amdgpu_bo *bo = mem->bo;
1890
1891         amdgpu_bo_reserve(bo, true);
1892         amdgpu_bo_kunmap(bo);
1893         amdgpu_bo_unpin(bo);
1894         amdgpu_bo_unreserve(bo);
1895 }
1896
1897 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1898                                               struct kfd_vm_fault_info *mem)
1899 {
1900         struct amdgpu_device *adev;
1901
1902         adev = (struct amdgpu_device *)kgd;
1903         if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1904                 *mem = *adev->gmc.vm_fault_info;
1905                 mb();
1906                 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1907         }
1908         return 0;
1909 }
1910
1911 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1912                                       struct dma_buf *dma_buf,
1913                                       uint64_t va, void *drm_priv,
1914                                       struct kgd_mem **mem, uint64_t *size,
1915                                       uint64_t *mmap_offset)
1916 {
1917         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1918         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1919         struct drm_gem_object *obj;
1920         struct amdgpu_bo *bo;
1921         int ret;
1922
1923         if (dma_buf->ops != &amdgpu_dmabuf_ops)
1924                 /* Can't handle non-graphics buffers */
1925                 return -EINVAL;
1926
1927         obj = dma_buf->priv;
1928         if (drm_to_adev(obj->dev) != adev)
1929                 /* Can't handle buffers from other devices */
1930                 return -EINVAL;
1931
1932         bo = gem_to_amdgpu_bo(obj);
1933         if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1934                                     AMDGPU_GEM_DOMAIN_GTT)))
1935                 /* Only VRAM and GTT BOs are supported */
1936                 return -EINVAL;
1937
1938         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1939         if (!*mem)
1940                 return -ENOMEM;
1941
1942         ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
1943         if (ret) {
1944                 kfree(mem);
1945                 return ret;
1946         }
1947
1948         if (size)
1949                 *size = amdgpu_bo_size(bo);
1950
1951         if (mmap_offset)
1952                 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1953
1954         INIT_LIST_HEAD(&(*mem)->attachments);
1955         mutex_init(&(*mem)->lock);
1956
1957         (*mem)->alloc_flags =
1958                 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1959                 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1960                 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1961                 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1962
1963         drm_gem_object_get(&bo->tbo.base);
1964         (*mem)->bo = bo;
1965         (*mem)->va = va;
1966         (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1967                 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1968         (*mem)->mapped_to_gpu_memory = 0;
1969         (*mem)->process_info = avm->process_info;
1970         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1971         amdgpu_sync_create(&(*mem)->sync);
1972         (*mem)->is_imported = true;
1973
1974         return 0;
1975 }
1976
1977 /* Evict a userptr BO by stopping the queues if necessary
1978  *
1979  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1980  * cannot do any memory allocations, and cannot take any locks that
1981  * are held elsewhere while allocating memory. Therefore this is as
1982  * simple as possible, using atomic counters.
1983  *
1984  * It doesn't do anything to the BO itself. The real work happens in
1985  * restore, where we get updated page addresses. This function only
1986  * ensures that GPU access to the BO is stopped.
1987  */
1988 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1989                                 struct mm_struct *mm)
1990 {
1991         struct amdkfd_process_info *process_info = mem->process_info;
1992         int evicted_bos;
1993         int r = 0;
1994
1995         atomic_inc(&mem->invalid);
1996         evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1997         if (evicted_bos == 1) {
1998                 /* First eviction, stop the queues */
1999                 r = kgd2kfd_quiesce_mm(mm);
2000                 if (r)
2001                         pr_err("Failed to quiesce KFD\n");
2002                 schedule_delayed_work(&process_info->restore_userptr_work,
2003                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2004         }
2005
2006         return r;
2007 }
2008
2009 /* Update invalid userptr BOs
2010  *
2011  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
2012  * userptr_inval_list and updates user pages for all BOs that have
2013  * been invalidated since their last update.
2014  */
2015 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
2016                                      struct mm_struct *mm)
2017 {
2018         struct kgd_mem *mem, *tmp_mem;
2019         struct amdgpu_bo *bo;
2020         struct ttm_operation_ctx ctx = { false, false };
2021         int invalid, ret;
2022
2023         /* Move all invalidated BOs to the userptr_inval_list and
2024          * release their user pages by migration to the CPU domain
2025          */
2026         list_for_each_entry_safe(mem, tmp_mem,
2027                                  &process_info->userptr_valid_list,
2028                                  validate_list.head) {
2029                 if (!atomic_read(&mem->invalid))
2030                         continue; /* BO is still valid */
2031
2032                 bo = mem->bo;
2033
2034                 if (amdgpu_bo_reserve(bo, true))
2035                         return -EAGAIN;
2036                 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
2037                 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2038                 amdgpu_bo_unreserve(bo);
2039                 if (ret) {
2040                         pr_err("%s: Failed to invalidate userptr BO\n",
2041                                __func__);
2042                         return -EAGAIN;
2043                 }
2044
2045                 list_move_tail(&mem->validate_list.head,
2046                                &process_info->userptr_inval_list);
2047         }
2048
2049         if (list_empty(&process_info->userptr_inval_list))
2050                 return 0; /* All evicted userptr BOs were freed */
2051
2052         /* Go through userptr_inval_list and update any invalid user_pages */
2053         list_for_each_entry(mem, &process_info->userptr_inval_list,
2054                             validate_list.head) {
2055                 invalid = atomic_read(&mem->invalid);
2056                 if (!invalid)
2057                         /* BO hasn't been invalidated since the last
2058                          * revalidation attempt. Keep its BO list.
2059                          */
2060                         continue;
2061
2062                 bo = mem->bo;
2063
2064                 /* Get updated user pages */
2065                 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
2066                 if (ret) {
2067                         pr_debug("Failed %d to get user pages\n", ret);
2068
2069                         /* Return -EFAULT bad address error as success. It will
2070                          * fail later with a VM fault if the GPU tries to access
2071                          * it. Better than hanging indefinitely with stalled
2072                          * user mode queues.
2073                          *
2074                          * Return other error -EBUSY or -ENOMEM to retry restore
2075                          */
2076                         if (ret != -EFAULT)
2077                                 return ret;
2078                 } else {
2079
2080                         /*
2081                          * FIXME: Cannot ignore the return code, must hold
2082                          * notifier_lock
2083                          */
2084                         amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
2085                 }
2086
2087                 /* Mark the BO as valid unless it was invalidated
2088                  * again concurrently.
2089                  */
2090                 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
2091                         return -EAGAIN;
2092         }
2093
2094         return 0;
2095 }
2096
2097 /* Validate invalid userptr BOs
2098  *
2099  * Validates BOs on the userptr_inval_list, and moves them back to the
2100  * userptr_valid_list. Also updates GPUVM page tables with new page
2101  * addresses and waits for the page table updates to complete.
2102  */
2103 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
2104 {
2105         struct amdgpu_bo_list_entry *pd_bo_list_entries;
2106         struct list_head resv_list, duplicates;
2107         struct ww_acquire_ctx ticket;
2108         struct amdgpu_sync sync;
2109
2110         struct amdgpu_vm *peer_vm;
2111         struct kgd_mem *mem, *tmp_mem;
2112         struct amdgpu_bo *bo;
2113         struct ttm_operation_ctx ctx = { false, false };
2114         int i, ret;
2115
2116         pd_bo_list_entries = kcalloc(process_info->n_vms,
2117                                      sizeof(struct amdgpu_bo_list_entry),
2118                                      GFP_KERNEL);
2119         if (!pd_bo_list_entries) {
2120                 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
2121                 ret = -ENOMEM;
2122                 goto out_no_mem;
2123         }
2124
2125         INIT_LIST_HEAD(&resv_list);
2126         INIT_LIST_HEAD(&duplicates);
2127
2128         /* Get all the page directory BOs that need to be reserved */
2129         i = 0;
2130         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2131                             vm_list_node)
2132                 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
2133                                     &pd_bo_list_entries[i++]);
2134         /* Add the userptr_inval_list entries to resv_list */
2135         list_for_each_entry(mem, &process_info->userptr_inval_list,
2136                             validate_list.head) {
2137                 list_add_tail(&mem->resv_list.head, &resv_list);
2138                 mem->resv_list.bo = mem->validate_list.bo;
2139                 mem->resv_list.num_shared = mem->validate_list.num_shared;
2140         }
2141
2142         /* Reserve all BOs and page tables for validation */
2143         ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
2144         WARN(!list_empty(&duplicates), "Duplicates should be empty");
2145         if (ret)
2146                 goto out_free;
2147
2148         amdgpu_sync_create(&sync);
2149
2150         ret = process_validate_vms(process_info);
2151         if (ret)
2152                 goto unreserve_out;
2153
2154         /* Validate BOs and update GPUVM page tables */
2155         list_for_each_entry_safe(mem, tmp_mem,
2156                                  &process_info->userptr_inval_list,
2157                                  validate_list.head) {
2158                 struct kfd_mem_attachment *attachment;
2159
2160                 bo = mem->bo;
2161
2162                 /* Validate the BO if we got user pages */
2163                 if (bo->tbo.ttm->pages[0]) {
2164                         amdgpu_bo_placement_from_domain(bo, mem->domain);
2165                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2166                         if (ret) {
2167                                 pr_err("%s: failed to validate BO\n", __func__);
2168                                 goto unreserve_out;
2169                         }
2170                 }
2171
2172                 list_move_tail(&mem->validate_list.head,
2173                                &process_info->userptr_valid_list);
2174
2175                 /* Update mapping. If the BO was not validated
2176                  * (because we couldn't get user pages), this will
2177                  * clear the page table entries, which will result in
2178                  * VM faults if the GPU tries to access the invalid
2179                  * memory.
2180                  */
2181                 list_for_each_entry(attachment, &mem->attachments, list) {
2182                         if (!attachment->is_mapped)
2183                                 continue;
2184
2185                         kfd_mem_dmaunmap_attachment(mem, attachment);
2186                         ret = update_gpuvm_pte(mem, attachment, &sync, NULL);
2187                         if (ret) {
2188                                 pr_err("%s: update PTE failed\n", __func__);
2189                                 /* make sure this gets validated again */
2190                                 atomic_inc(&mem->invalid);
2191                                 goto unreserve_out;
2192                         }
2193                 }
2194         }
2195
2196         /* Update page directories */
2197         ret = process_update_pds(process_info, &sync);
2198
2199 unreserve_out:
2200         ttm_eu_backoff_reservation(&ticket, &resv_list);
2201         amdgpu_sync_wait(&sync, false);
2202         amdgpu_sync_free(&sync);
2203 out_free:
2204         kfree(pd_bo_list_entries);
2205 out_no_mem:
2206
2207         return ret;
2208 }
2209
2210 /* Worker callback to restore evicted userptr BOs
2211  *
2212  * Tries to update and validate all userptr BOs. If successful and no
2213  * concurrent evictions happened, the queues are restarted. Otherwise,
2214  * reschedule for another attempt later.
2215  */
2216 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
2217 {
2218         struct delayed_work *dwork = to_delayed_work(work);
2219         struct amdkfd_process_info *process_info =
2220                 container_of(dwork, struct amdkfd_process_info,
2221                              restore_userptr_work);
2222         struct task_struct *usertask;
2223         struct mm_struct *mm;
2224         int evicted_bos;
2225
2226         evicted_bos = atomic_read(&process_info->evicted_bos);
2227         if (!evicted_bos)
2228                 return;
2229
2230         /* Reference task and mm in case of concurrent process termination */
2231         usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
2232         if (!usertask)
2233                 return;
2234         mm = get_task_mm(usertask);
2235         if (!mm) {
2236                 put_task_struct(usertask);
2237                 return;
2238         }
2239
2240         mutex_lock(&process_info->lock);
2241
2242         if (update_invalid_user_pages(process_info, mm))
2243                 goto unlock_out;
2244         /* userptr_inval_list can be empty if all evicted userptr BOs
2245          * have been freed. In that case there is nothing to validate
2246          * and we can just restart the queues.
2247          */
2248         if (!list_empty(&process_info->userptr_inval_list)) {
2249                 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
2250                         goto unlock_out; /* Concurrent eviction, try again */
2251
2252                 if (validate_invalid_user_pages(process_info))
2253                         goto unlock_out;
2254         }
2255         /* Final check for concurrent evicton and atomic update. If
2256          * another eviction happens after successful update, it will
2257          * be a first eviction that calls quiesce_mm. The eviction
2258          * reference counting inside KFD will handle this case.
2259          */
2260         if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
2261             evicted_bos)
2262                 goto unlock_out;
2263         evicted_bos = 0;
2264         if (kgd2kfd_resume_mm(mm)) {
2265                 pr_err("%s: Failed to resume KFD\n", __func__);
2266                 /* No recovery from this failure. Probably the CP is
2267                  * hanging. No point trying again.
2268                  */
2269         }
2270
2271 unlock_out:
2272         mutex_unlock(&process_info->lock);
2273         mmput(mm);
2274         put_task_struct(usertask);
2275
2276         /* If validation failed, reschedule another attempt */
2277         if (evicted_bos)
2278                 schedule_delayed_work(&process_info->restore_userptr_work,
2279                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2280 }
2281
2282 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2283  *   KFD process identified by process_info
2284  *
2285  * @process_info: amdkfd_process_info of the KFD process
2286  *
2287  * After memory eviction, restore thread calls this function. The function
2288  * should be called when the Process is still valid. BO restore involves -
2289  *
2290  * 1.  Release old eviction fence and create new one
2291  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2292  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2293  *     BOs that need to be reserved.
2294  * 4.  Reserve all the BOs
2295  * 5.  Validate of PD and PT BOs.
2296  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2297  * 7.  Add fence to all PD and PT BOs.
2298  * 8.  Unreserve all BOs
2299  */
2300 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2301 {
2302         struct amdgpu_bo_list_entry *pd_bo_list;
2303         struct amdkfd_process_info *process_info = info;
2304         struct amdgpu_vm *peer_vm;
2305         struct kgd_mem *mem;
2306         struct bo_vm_reservation_context ctx;
2307         struct amdgpu_amdkfd_fence *new_fence;
2308         int ret = 0, i;
2309         struct list_head duplicate_save;
2310         struct amdgpu_sync sync_obj;
2311         unsigned long failed_size = 0;
2312         unsigned long total_size = 0;
2313
2314         INIT_LIST_HEAD(&duplicate_save);
2315         INIT_LIST_HEAD(&ctx.list);
2316         INIT_LIST_HEAD(&ctx.duplicates);
2317
2318         pd_bo_list = kcalloc(process_info->n_vms,
2319                              sizeof(struct amdgpu_bo_list_entry),
2320                              GFP_KERNEL);
2321         if (!pd_bo_list)
2322                 return -ENOMEM;
2323
2324         i = 0;
2325         mutex_lock(&process_info->lock);
2326         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2327                         vm_list_node)
2328                 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2329
2330         /* Reserve all BOs and page tables/directory. Add all BOs from
2331          * kfd_bo_list to ctx.list
2332          */
2333         list_for_each_entry(mem, &process_info->kfd_bo_list,
2334                             validate_list.head) {
2335
2336                 list_add_tail(&mem->resv_list.head, &ctx.list);
2337                 mem->resv_list.bo = mem->validate_list.bo;
2338                 mem->resv_list.num_shared = mem->validate_list.num_shared;
2339         }
2340
2341         ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2342                                      false, &duplicate_save);
2343         if (ret) {
2344                 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2345                 goto ttm_reserve_fail;
2346         }
2347
2348         amdgpu_sync_create(&sync_obj);
2349
2350         /* Validate PDs and PTs */
2351         ret = process_validate_vms(process_info);
2352         if (ret)
2353                 goto validate_map_fail;
2354
2355         ret = process_sync_pds_resv(process_info, &sync_obj);
2356         if (ret) {
2357                 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2358                 goto validate_map_fail;
2359         }
2360
2361         /* Validate BOs and map them to GPUVM (update VM page tables). */
2362         list_for_each_entry(mem, &process_info->kfd_bo_list,
2363                             validate_list.head) {
2364
2365                 struct amdgpu_bo *bo = mem->bo;
2366                 uint32_t domain = mem->domain;
2367                 struct kfd_mem_attachment *attachment;
2368
2369                 total_size += amdgpu_bo_size(bo);
2370
2371                 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2372                 if (ret) {
2373                         pr_debug("Memory eviction: Validate BOs failed\n");
2374                         failed_size += amdgpu_bo_size(bo);
2375                         ret = amdgpu_amdkfd_bo_validate(bo,
2376                                                 AMDGPU_GEM_DOMAIN_GTT, false);
2377                         if (ret) {
2378                                 pr_debug("Memory eviction: Try again\n");
2379                                 goto validate_map_fail;
2380                         }
2381                 }
2382                 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2383                 if (ret) {
2384                         pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2385                         goto validate_map_fail;
2386                 }
2387                 list_for_each_entry(attachment, &mem->attachments, list) {
2388                         if (!attachment->is_mapped)
2389                                 continue;
2390
2391                         kfd_mem_dmaunmap_attachment(mem, attachment);
2392                         ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL);
2393                         if (ret) {
2394                                 pr_debug("Memory eviction: update PTE failed. Try again\n");
2395                                 goto validate_map_fail;
2396                         }
2397                 }
2398         }
2399
2400         if (failed_size)
2401                 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2402
2403         /* Update page directories */
2404         ret = process_update_pds(process_info, &sync_obj);
2405         if (ret) {
2406                 pr_debug("Memory eviction: update PDs failed. Try again\n");
2407                 goto validate_map_fail;
2408         }
2409
2410         /* Wait for validate and PT updates to finish */
2411         amdgpu_sync_wait(&sync_obj, false);
2412
2413         /* Release old eviction fence and create new one, because fence only
2414          * goes from unsignaled to signaled, fence cannot be reused.
2415          * Use context and mm from the old fence.
2416          */
2417         new_fence = amdgpu_amdkfd_fence_create(
2418                                 process_info->eviction_fence->base.context,
2419                                 process_info->eviction_fence->mm,
2420                                 NULL);
2421         if (!new_fence) {
2422                 pr_err("Failed to create eviction fence\n");
2423                 ret = -ENOMEM;
2424                 goto validate_map_fail;
2425         }
2426         dma_fence_put(&process_info->eviction_fence->base);
2427         process_info->eviction_fence = new_fence;
2428         *ef = dma_fence_get(&new_fence->base);
2429
2430         /* Attach new eviction fence to all BOs */
2431         list_for_each_entry(mem, &process_info->kfd_bo_list,
2432                 validate_list.head)
2433                 amdgpu_bo_fence(mem->bo,
2434                         &process_info->eviction_fence->base, true);
2435
2436         /* Attach eviction fence to PD / PT BOs */
2437         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2438                             vm_list_node) {
2439                 struct amdgpu_bo *bo = peer_vm->root.bo;
2440
2441                 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2442         }
2443
2444 validate_map_fail:
2445         ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2446         amdgpu_sync_free(&sync_obj);
2447 ttm_reserve_fail:
2448         mutex_unlock(&process_info->lock);
2449         kfree(pd_bo_list);
2450         return ret;
2451 }
2452
2453 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2454 {
2455         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2456         struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2457         int ret;
2458
2459         if (!info || !gws)
2460                 return -EINVAL;
2461
2462         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2463         if (!*mem)
2464                 return -ENOMEM;
2465
2466         mutex_init(&(*mem)->lock);
2467         INIT_LIST_HEAD(&(*mem)->attachments);
2468         (*mem)->bo = amdgpu_bo_ref(gws_bo);
2469         (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2470         (*mem)->process_info = process_info;
2471         add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2472         amdgpu_sync_create(&(*mem)->sync);
2473
2474
2475         /* Validate gws bo the first time it is added to process */
2476         mutex_lock(&(*mem)->process_info->lock);
2477         ret = amdgpu_bo_reserve(gws_bo, false);
2478         if (unlikely(ret)) {
2479                 pr_err("Reserve gws bo failed %d\n", ret);
2480                 goto bo_reservation_failure;
2481         }
2482
2483         ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2484         if (ret) {
2485                 pr_err("GWS BO validate failed %d\n", ret);
2486                 goto bo_validation_failure;
2487         }
2488         /* GWS resource is shared b/t amdgpu and amdkfd
2489          * Add process eviction fence to bo so they can
2490          * evict each other.
2491          */
2492         ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2493         if (ret)
2494                 goto reserve_shared_fail;
2495         amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2496         amdgpu_bo_unreserve(gws_bo);
2497         mutex_unlock(&(*mem)->process_info->lock);
2498
2499         return ret;
2500
2501 reserve_shared_fail:
2502 bo_validation_failure:
2503         amdgpu_bo_unreserve(gws_bo);
2504 bo_reservation_failure:
2505         mutex_unlock(&(*mem)->process_info->lock);
2506         amdgpu_sync_free(&(*mem)->sync);
2507         remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2508         amdgpu_bo_unref(&gws_bo);
2509         mutex_destroy(&(*mem)->lock);
2510         kfree(*mem);
2511         *mem = NULL;
2512         return ret;
2513 }
2514
2515 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2516 {
2517         int ret;
2518         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2519         struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2520         struct amdgpu_bo *gws_bo = kgd_mem->bo;
2521
2522         /* Remove BO from process's validate list so restore worker won't touch
2523          * it anymore
2524          */
2525         remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2526
2527         ret = amdgpu_bo_reserve(gws_bo, false);
2528         if (unlikely(ret)) {
2529                 pr_err("Reserve gws bo failed %d\n", ret);
2530                 //TODO add BO back to validate_list?
2531                 return ret;
2532         }
2533         amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2534                         process_info->eviction_fence);
2535         amdgpu_bo_unreserve(gws_bo);
2536         amdgpu_sync_free(&kgd_mem->sync);
2537         amdgpu_bo_unref(&gws_bo);
2538         mutex_destroy(&kgd_mem->lock);
2539         kfree(mem);
2540         return 0;
2541 }
2542
2543 /* Returns GPU-specific tiling mode information */
2544 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2545                                 struct tile_config *config)
2546 {
2547         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2548
2549         config->gb_addr_config = adev->gfx.config.gb_addr_config;
2550         config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2551         config->num_tile_configs =
2552                         ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2553         config->macro_tile_config_ptr =
2554                         adev->gfx.config.macrotile_mode_array;
2555         config->num_macro_tile_configs =
2556                         ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2557
2558         /* Those values are not set from GFX9 onwards */
2559         config->num_banks = adev->gfx.config.num_banks;
2560         config->num_ranks = adev->gfx.config.num_ranks;
2561
2562         return 0;
2563 }
This page took 0.194841 seconds and 4 git commands to generate.