]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
Merge tag 'for-5.13/libata-2021-04-27' of git://git.kernel.dk/linux-block
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/dma-buf.h>
23 #include <linux/list.h>
24 #include <linux/pagemap.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/task.h>
27
28 #include "amdgpu_object.h"
29 #include "amdgpu_gem.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_amdkfd.h"
32 #include "amdgpu_dma_buf.h"
33 #include <uapi/linux/kfd_ioctl.h>
34 #include "amdgpu_xgmi.h"
35
36 /* BO flag to indicate a KFD userptr BO */
37 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
38
39 /* Userptr restore delay, just long enough to allow consecutive VM
40  * changes to accumulate
41  */
42 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
43
44 /* Impose limit on how much memory KFD can use */
45 static struct {
46         uint64_t max_system_mem_limit;
47         uint64_t max_ttm_mem_limit;
48         int64_t system_mem_used;
49         int64_t ttm_mem_used;
50         spinlock_t mem_limit_lock;
51 } kfd_mem_limit;
52
53 /* Struct used for amdgpu_amdkfd_bo_validate */
54 struct amdgpu_vm_parser {
55         uint32_t        domain;
56         bool            wait;
57 };
58
59 static const char * const domain_bit_to_string[] = {
60                 "CPU",
61                 "GTT",
62                 "VRAM",
63                 "GDS",
64                 "GWS",
65                 "OA"
66 };
67
68 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
69
70 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
71
72
73 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
74 {
75         return (struct amdgpu_device *)kgd;
76 }
77
78 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
79                 struct kgd_mem *mem)
80 {
81         struct kfd_bo_va_list *entry;
82
83         list_for_each_entry(entry, &mem->bo_va_list, bo_list)
84                 if (entry->bo_va->base.vm == avm)
85                         return false;
86
87         return true;
88 }
89
90 /* Set memory usage limits. Current, limits are
91  *  System (TTM + userptr) memory - 15/16th System RAM
92  *  TTM memory - 3/8th System RAM
93  */
94 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
95 {
96         struct sysinfo si;
97         uint64_t mem;
98
99         si_meminfo(&si);
100         mem = si.freeram - si.freehigh;
101         mem *= si.mem_unit;
102
103         spin_lock_init(&kfd_mem_limit.mem_limit_lock);
104         kfd_mem_limit.max_system_mem_limit = mem - (mem >> 4);
105         kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
106         pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
107                 (kfd_mem_limit.max_system_mem_limit >> 20),
108                 (kfd_mem_limit.max_ttm_mem_limit >> 20));
109 }
110
111 /* Estimate page table size needed to represent a given memory size
112  *
113  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
114  * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
115  * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
116  * for 2MB pages for TLB efficiency. However, small allocations and
117  * fragmented system memory still need some 4KB pages. We choose a
118  * compromise that should work in most cases without reserving too
119  * much memory for page tables unnecessarily (factor 16K, >> 14).
120  */
121 #define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
122
123 static size_t amdgpu_amdkfd_acc_size(uint64_t size)
124 {
125         size >>= PAGE_SHIFT;
126         size *= sizeof(dma_addr_t) + sizeof(void *);
127
128         return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
129                 __roundup_pow_of_two(sizeof(struct ttm_tt)) +
130                 PAGE_ALIGN(size);
131 }
132
133 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
134                 uint64_t size, u32 domain, bool sg)
135 {
136         uint64_t reserved_for_pt =
137                 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
138         size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
139         int ret = 0;
140
141         acc_size = amdgpu_amdkfd_acc_size(size);
142
143         vram_needed = 0;
144         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
145                 /* TTM GTT memory */
146                 system_mem_needed = acc_size + size;
147                 ttm_mem_needed = acc_size + size;
148         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
149                 /* Userptr */
150                 system_mem_needed = acc_size + size;
151                 ttm_mem_needed = acc_size;
152         } else {
153                 /* VRAM and SG */
154                 system_mem_needed = acc_size;
155                 ttm_mem_needed = acc_size;
156                 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
157                         vram_needed = size;
158         }
159
160         spin_lock(&kfd_mem_limit.mem_limit_lock);
161
162         if (kfd_mem_limit.system_mem_used + system_mem_needed >
163             kfd_mem_limit.max_system_mem_limit)
164                 pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
165
166         if ((kfd_mem_limit.system_mem_used + system_mem_needed >
167              kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
168             (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
169              kfd_mem_limit.max_ttm_mem_limit) ||
170             (adev->kfd.vram_used + vram_needed >
171              adev->gmc.real_vram_size - reserved_for_pt)) {
172                 ret = -ENOMEM;
173         } else {
174                 kfd_mem_limit.system_mem_used += system_mem_needed;
175                 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
176                 adev->kfd.vram_used += vram_needed;
177         }
178
179         spin_unlock(&kfd_mem_limit.mem_limit_lock);
180         return ret;
181 }
182
183 static void unreserve_mem_limit(struct amdgpu_device *adev,
184                 uint64_t size, u32 domain, bool sg)
185 {
186         size_t acc_size;
187
188         acc_size = amdgpu_amdkfd_acc_size(size);
189
190         spin_lock(&kfd_mem_limit.mem_limit_lock);
191         if (domain == AMDGPU_GEM_DOMAIN_GTT) {
192                 kfd_mem_limit.system_mem_used -= (acc_size + size);
193                 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
194         } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
195                 kfd_mem_limit.system_mem_used -= (acc_size + size);
196                 kfd_mem_limit.ttm_mem_used -= acc_size;
197         } else {
198                 kfd_mem_limit.system_mem_used -= acc_size;
199                 kfd_mem_limit.ttm_mem_used -= acc_size;
200                 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
201                         adev->kfd.vram_used -= size;
202                         WARN_ONCE(adev->kfd.vram_used < 0,
203                                   "kfd VRAM memory accounting unbalanced");
204                 }
205         }
206         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
207                   "kfd system memory accounting unbalanced");
208         WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
209                   "kfd TTM memory accounting unbalanced");
210
211         spin_unlock(&kfd_mem_limit.mem_limit_lock);
212 }
213
214 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
215 {
216         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
217         u32 domain = bo->preferred_domains;
218         bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
219
220         if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
221                 domain = AMDGPU_GEM_DOMAIN_CPU;
222                 sg = false;
223         }
224
225         unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
226 }
227
228
229 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
230  *  reservation object.
231  *
232  * @bo: [IN] Remove eviction fence(s) from this BO
233  * @ef: [IN] This eviction fence is removed if it
234  *  is present in the shared list.
235  *
236  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
237  */
238 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
239                                         struct amdgpu_amdkfd_fence *ef)
240 {
241         struct dma_resv *resv = bo->tbo.base.resv;
242         struct dma_resv_list *old, *new;
243         unsigned int i, j, k;
244
245         if (!ef)
246                 return -EINVAL;
247
248         old = dma_resv_get_list(resv);
249         if (!old)
250                 return 0;
251
252         new = kmalloc(struct_size(new, shared, old->shared_max), GFP_KERNEL);
253         if (!new)
254                 return -ENOMEM;
255
256         /* Go through all the shared fences in the resevation object and sort
257          * the interesting ones to the end of the list.
258          */
259         for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
260                 struct dma_fence *f;
261
262                 f = rcu_dereference_protected(old->shared[i],
263                                               dma_resv_held(resv));
264
265                 if (f->context == ef->base.context)
266                         RCU_INIT_POINTER(new->shared[--j], f);
267                 else
268                         RCU_INIT_POINTER(new->shared[k++], f);
269         }
270         new->shared_max = old->shared_max;
271         new->shared_count = k;
272
273         /* Install the new fence list, seqcount provides the barriers */
274         write_seqcount_begin(&resv->seq);
275         RCU_INIT_POINTER(resv->fence, new);
276         write_seqcount_end(&resv->seq);
277
278         /* Drop the references to the removed fences or move them to ef_list */
279         for (i = j, k = 0; i < old->shared_count; ++i) {
280                 struct dma_fence *f;
281
282                 f = rcu_dereference_protected(new->shared[i],
283                                               dma_resv_held(resv));
284                 dma_fence_put(f);
285         }
286         kfree_rcu(old, rcu);
287
288         return 0;
289 }
290
291 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
292 {
293         struct amdgpu_bo *root = bo;
294         struct amdgpu_vm_bo_base *vm_bo;
295         struct amdgpu_vm *vm;
296         struct amdkfd_process_info *info;
297         struct amdgpu_amdkfd_fence *ef;
298         int ret;
299
300         /* we can always get vm_bo from root PD bo.*/
301         while (root->parent)
302                 root = root->parent;
303
304         vm_bo = root->vm_bo;
305         if (!vm_bo)
306                 return 0;
307
308         vm = vm_bo->vm;
309         if (!vm)
310                 return 0;
311
312         info = vm->process_info;
313         if (!info || !info->eviction_fence)
314                 return 0;
315
316         ef = container_of(dma_fence_get(&info->eviction_fence->base),
317                         struct amdgpu_amdkfd_fence, base);
318
319         BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
320         ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
321         dma_resv_unlock(bo->tbo.base.resv);
322
323         dma_fence_put(&ef->base);
324         return ret;
325 }
326
327 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
328                                      bool wait)
329 {
330         struct ttm_operation_ctx ctx = { false, false };
331         int ret;
332
333         if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
334                  "Called with userptr BO"))
335                 return -EINVAL;
336
337         amdgpu_bo_placement_from_domain(bo, domain);
338
339         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
340         if (ret)
341                 goto validate_fail;
342         if (wait)
343                 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
344
345 validate_fail:
346         return ret;
347 }
348
349 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
350 {
351         struct amdgpu_vm_parser *p = param;
352
353         return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
354 }
355
356 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
357  *
358  * Page directories are not updated here because huge page handling
359  * during page table updates can invalidate page directory entries
360  * again. Page directories are only updated after updating page
361  * tables.
362  */
363 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
364 {
365         struct amdgpu_bo *pd = vm->root.base.bo;
366         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
367         struct amdgpu_vm_parser param;
368         int ret;
369
370         param.domain = AMDGPU_GEM_DOMAIN_VRAM;
371         param.wait = false;
372
373         ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
374                                         &param);
375         if (ret) {
376                 pr_err("failed to validate PT BOs\n");
377                 return ret;
378         }
379
380         ret = amdgpu_amdkfd_validate(&param, pd);
381         if (ret) {
382                 pr_err("failed to validate PD\n");
383                 return ret;
384         }
385
386         vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
387
388         if (vm->use_cpu_for_update) {
389                 ret = amdgpu_bo_kmap(pd, NULL);
390                 if (ret) {
391                         pr_err("failed to kmap PD, ret=%d\n", ret);
392                         return ret;
393                 }
394         }
395
396         return 0;
397 }
398
399 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
400 {
401         struct amdgpu_bo *pd = vm->root.base.bo;
402         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
403         int ret;
404
405         ret = amdgpu_vm_update_pdes(adev, vm, false);
406         if (ret)
407                 return ret;
408
409         return amdgpu_sync_fence(sync, vm->last_update);
410 }
411
412 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
413 {
414         struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
415         bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
416         bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED;
417         uint32_t mapping_flags;
418         uint64_t pte_flags;
419         bool snoop = false;
420
421         mapping_flags = AMDGPU_VM_PAGE_READABLE;
422         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
423                 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
424         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
425                 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
426
427         switch (adev->asic_type) {
428         case CHIP_ARCTURUS:
429                 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
430                         if (bo_adev == adev)
431                                 mapping_flags |= coherent ?
432                                         AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
433                         else
434                                 mapping_flags |= AMDGPU_VM_MTYPE_UC;
435                 } else {
436                         mapping_flags |= coherent ?
437                                 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
438                 }
439                 break;
440         case CHIP_ALDEBARAN:
441                 if (coherent && uncached) {
442                         if (adev->gmc.xgmi.connected_to_cpu ||
443                                 !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
444                                 snoop = true;
445                         mapping_flags |= AMDGPU_VM_MTYPE_UC;
446                 } else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
447                         if (bo_adev == adev) {
448                                 mapping_flags |= AMDGPU_VM_MTYPE_RW;
449                                 if (adev->gmc.xgmi.connected_to_cpu)
450                                         snoop = true;
451                         } else {
452                                 mapping_flags |= AMDGPU_VM_MTYPE_NC;
453                                 if (amdgpu_xgmi_same_hive(adev, bo_adev))
454                                         snoop = true;
455                         }
456                 } else {
457                         snoop = true;
458                         if (adev->gmc.xgmi.connected_to_cpu)
459                                 /* system memory uses NC on A+A */
460                                 mapping_flags |= AMDGPU_VM_MTYPE_NC;
461                         else
462                                 mapping_flags |= coherent ?
463                                         AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
464                 }
465                 break;
466         default:
467                 mapping_flags |= coherent ?
468                         AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
469         }
470
471         pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
472         pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
473
474         return pte_flags;
475 }
476
477 /* add_bo_to_vm - Add a BO to a VM
478  *
479  * Everything that needs to bo done only once when a BO is first added
480  * to a VM. It can later be mapped and unmapped many times without
481  * repeating these steps.
482  *
483  * 1. Allocate and initialize BO VA entry data structure
484  * 2. Add BO to the VM
485  * 3. Determine ASIC-specific PTE flags
486  * 4. Alloc page tables and directories if needed
487  * 4a.  Validate new page tables and directories
488  */
489 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
490                 struct amdgpu_vm *vm, bool is_aql,
491                 struct kfd_bo_va_list **p_bo_va_entry)
492 {
493         int ret;
494         struct kfd_bo_va_list *bo_va_entry;
495         struct amdgpu_bo *bo = mem->bo;
496         uint64_t va = mem->va;
497         struct list_head *list_bo_va = &mem->bo_va_list;
498         unsigned long bo_size = bo->tbo.base.size;
499
500         if (!va) {
501                 pr_err("Invalid VA when adding BO to VM\n");
502                 return -EINVAL;
503         }
504
505         if (is_aql)
506                 va += bo_size;
507
508         bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
509         if (!bo_va_entry)
510                 return -ENOMEM;
511
512         pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
513                         va + bo_size, vm);
514
515         /* Add BO to VM internal data structures*/
516         bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
517         if (!bo_va_entry->bo_va) {
518                 ret = -EINVAL;
519                 pr_err("Failed to add BO object to VM. ret == %d\n",
520                                 ret);
521                 goto err_vmadd;
522         }
523
524         bo_va_entry->va = va;
525         bo_va_entry->pte_flags = get_pte_flags(adev, mem);
526         bo_va_entry->kgd_dev = (void *)adev;
527         list_add(&bo_va_entry->bo_list, list_bo_va);
528
529         if (p_bo_va_entry)
530                 *p_bo_va_entry = bo_va_entry;
531
532         /* Allocate validate page tables if needed */
533         ret = vm_validate_pt_pd_bos(vm);
534         if (ret) {
535                 pr_err("validate_pt_pd_bos() failed\n");
536                 goto err_alloc_pts;
537         }
538
539         return 0;
540
541 err_alloc_pts:
542         amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
543         list_del(&bo_va_entry->bo_list);
544 err_vmadd:
545         kfree(bo_va_entry);
546         return ret;
547 }
548
549 static void remove_bo_from_vm(struct amdgpu_device *adev,
550                 struct kfd_bo_va_list *entry, unsigned long size)
551 {
552         pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
553                         entry->va,
554                         entry->va + size, entry);
555         amdgpu_vm_bo_rmv(adev, entry->bo_va);
556         list_del(&entry->bo_list);
557         kfree(entry);
558 }
559
560 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
561                                 struct amdkfd_process_info *process_info,
562                                 bool userptr)
563 {
564         struct ttm_validate_buffer *entry = &mem->validate_list;
565         struct amdgpu_bo *bo = mem->bo;
566
567         INIT_LIST_HEAD(&entry->head);
568         entry->num_shared = 1;
569         entry->bo = &bo->tbo;
570         mutex_lock(&process_info->lock);
571         if (userptr)
572                 list_add_tail(&entry->head, &process_info->userptr_valid_list);
573         else
574                 list_add_tail(&entry->head, &process_info->kfd_bo_list);
575         mutex_unlock(&process_info->lock);
576 }
577
578 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
579                 struct amdkfd_process_info *process_info)
580 {
581         struct ttm_validate_buffer *bo_list_entry;
582
583         bo_list_entry = &mem->validate_list;
584         mutex_lock(&process_info->lock);
585         list_del(&bo_list_entry->head);
586         mutex_unlock(&process_info->lock);
587 }
588
589 /* Initializes user pages. It registers the MMU notifier and validates
590  * the userptr BO in the GTT domain.
591  *
592  * The BO must already be on the userptr_valid_list. Otherwise an
593  * eviction and restore may happen that leaves the new BO unmapped
594  * with the user mode queues running.
595  *
596  * Takes the process_info->lock to protect against concurrent restore
597  * workers.
598  *
599  * Returns 0 for success, negative errno for errors.
600  */
601 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr)
602 {
603         struct amdkfd_process_info *process_info = mem->process_info;
604         struct amdgpu_bo *bo = mem->bo;
605         struct ttm_operation_ctx ctx = { true, false };
606         int ret = 0;
607
608         mutex_lock(&process_info->lock);
609
610         ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
611         if (ret) {
612                 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
613                 goto out;
614         }
615
616         ret = amdgpu_mn_register(bo, user_addr);
617         if (ret) {
618                 pr_err("%s: Failed to register MMU notifier: %d\n",
619                        __func__, ret);
620                 goto out;
621         }
622
623         ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
624         if (ret) {
625                 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
626                 goto unregister_out;
627         }
628
629         ret = amdgpu_bo_reserve(bo, true);
630         if (ret) {
631                 pr_err("%s: Failed to reserve BO\n", __func__);
632                 goto release_out;
633         }
634         amdgpu_bo_placement_from_domain(bo, mem->domain);
635         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
636         if (ret)
637                 pr_err("%s: failed to validate BO\n", __func__);
638         amdgpu_bo_unreserve(bo);
639
640 release_out:
641         amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
642 unregister_out:
643         if (ret)
644                 amdgpu_mn_unregister(bo);
645 out:
646         mutex_unlock(&process_info->lock);
647         return ret;
648 }
649
650 /* Reserving a BO and its page table BOs must happen atomically to
651  * avoid deadlocks. Some operations update multiple VMs at once. Track
652  * all the reservation info in a context structure. Optionally a sync
653  * object can track VM updates.
654  */
655 struct bo_vm_reservation_context {
656         struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
657         unsigned int n_vms;                 /* Number of VMs reserved       */
658         struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries  */
659         struct ww_acquire_ctx ticket;       /* Reservation ticket           */
660         struct list_head list, duplicates;  /* BO lists                     */
661         struct amdgpu_sync *sync;           /* Pointer to sync object       */
662         bool reserved;                      /* Whether BOs are reserved     */
663 };
664
665 enum bo_vm_match {
666         BO_VM_NOT_MAPPED = 0,   /* Match VMs where a BO is not mapped */
667         BO_VM_MAPPED,           /* Match VMs where a BO is mapped     */
668         BO_VM_ALL,              /* Match all VMs a BO was added to    */
669 };
670
671 /**
672  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
673  * @mem: KFD BO structure.
674  * @vm: the VM to reserve.
675  * @ctx: the struct that will be used in unreserve_bo_and_vms().
676  */
677 static int reserve_bo_and_vm(struct kgd_mem *mem,
678                               struct amdgpu_vm *vm,
679                               struct bo_vm_reservation_context *ctx)
680 {
681         struct amdgpu_bo *bo = mem->bo;
682         int ret;
683
684         WARN_ON(!vm);
685
686         ctx->reserved = false;
687         ctx->n_vms = 1;
688         ctx->sync = &mem->sync;
689
690         INIT_LIST_HEAD(&ctx->list);
691         INIT_LIST_HEAD(&ctx->duplicates);
692
693         ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
694         if (!ctx->vm_pd)
695                 return -ENOMEM;
696
697         ctx->kfd_bo.priority = 0;
698         ctx->kfd_bo.tv.bo = &bo->tbo;
699         ctx->kfd_bo.tv.num_shared = 1;
700         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
701
702         amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
703
704         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
705                                      false, &ctx->duplicates);
706         if (ret) {
707                 pr_err("Failed to reserve buffers in ttm.\n");
708                 kfree(ctx->vm_pd);
709                 ctx->vm_pd = NULL;
710                 return ret;
711         }
712
713         ctx->reserved = true;
714         return 0;
715 }
716
717 /**
718  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
719  * @mem: KFD BO structure.
720  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
721  * is used. Otherwise, a single VM associated with the BO.
722  * @map_type: the mapping status that will be used to filter the VMs.
723  * @ctx: the struct that will be used in unreserve_bo_and_vms().
724  *
725  * Returns 0 for success, negative for failure.
726  */
727 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
728                                 struct amdgpu_vm *vm, enum bo_vm_match map_type,
729                                 struct bo_vm_reservation_context *ctx)
730 {
731         struct amdgpu_bo *bo = mem->bo;
732         struct kfd_bo_va_list *entry;
733         unsigned int i;
734         int ret;
735
736         ctx->reserved = false;
737         ctx->n_vms = 0;
738         ctx->vm_pd = NULL;
739         ctx->sync = &mem->sync;
740
741         INIT_LIST_HEAD(&ctx->list);
742         INIT_LIST_HEAD(&ctx->duplicates);
743
744         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
745                 if ((vm && vm != entry->bo_va->base.vm) ||
746                         (entry->is_mapped != map_type
747                         && map_type != BO_VM_ALL))
748                         continue;
749
750                 ctx->n_vms++;
751         }
752
753         if (ctx->n_vms != 0) {
754                 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
755                                      GFP_KERNEL);
756                 if (!ctx->vm_pd)
757                         return -ENOMEM;
758         }
759
760         ctx->kfd_bo.priority = 0;
761         ctx->kfd_bo.tv.bo = &bo->tbo;
762         ctx->kfd_bo.tv.num_shared = 1;
763         list_add(&ctx->kfd_bo.tv.head, &ctx->list);
764
765         i = 0;
766         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
767                 if ((vm && vm != entry->bo_va->base.vm) ||
768                         (entry->is_mapped != map_type
769                         && map_type != BO_VM_ALL))
770                         continue;
771
772                 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
773                                 &ctx->vm_pd[i]);
774                 i++;
775         }
776
777         ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
778                                      false, &ctx->duplicates);
779         if (ret) {
780                 pr_err("Failed to reserve buffers in ttm.\n");
781                 kfree(ctx->vm_pd);
782                 ctx->vm_pd = NULL;
783                 return ret;
784         }
785
786         ctx->reserved = true;
787         return 0;
788 }
789
790 /**
791  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
792  * @ctx: Reservation context to unreserve
793  * @wait: Optionally wait for a sync object representing pending VM updates
794  * @intr: Whether the wait is interruptible
795  *
796  * Also frees any resources allocated in
797  * reserve_bo_and_(cond_)vm(s). Returns the status from
798  * amdgpu_sync_wait.
799  */
800 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
801                                  bool wait, bool intr)
802 {
803         int ret = 0;
804
805         if (wait)
806                 ret = amdgpu_sync_wait(ctx->sync, intr);
807
808         if (ctx->reserved)
809                 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
810         kfree(ctx->vm_pd);
811
812         ctx->sync = NULL;
813
814         ctx->reserved = false;
815         ctx->vm_pd = NULL;
816
817         return ret;
818 }
819
820 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
821                                 struct kfd_bo_va_list *entry,
822                                 struct amdgpu_sync *sync)
823 {
824         struct amdgpu_bo_va *bo_va = entry->bo_va;
825         struct amdgpu_vm *vm = bo_va->base.vm;
826
827         amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
828
829         amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
830
831         amdgpu_sync_fence(sync, bo_va->last_pt_update);
832
833         return 0;
834 }
835
836 static int update_gpuvm_pte(struct amdgpu_device *adev,
837                 struct kfd_bo_va_list *entry,
838                 struct amdgpu_sync *sync)
839 {
840         int ret;
841         struct amdgpu_bo_va *bo_va = entry->bo_va;
842
843         /* Update the page tables  */
844         ret = amdgpu_vm_bo_update(adev, bo_va, false);
845         if (ret) {
846                 pr_err("amdgpu_vm_bo_update failed\n");
847                 return ret;
848         }
849
850         return amdgpu_sync_fence(sync, bo_va->last_pt_update);
851 }
852
853 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
854                 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
855                 bool no_update_pte)
856 {
857         int ret;
858
859         /* Set virtual address for the allocation */
860         ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
861                                amdgpu_bo_size(entry->bo_va->base.bo),
862                                entry->pte_flags);
863         if (ret) {
864                 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
865                                 entry->va, ret);
866                 return ret;
867         }
868
869         if (no_update_pte)
870                 return 0;
871
872         ret = update_gpuvm_pte(adev, entry, sync);
873         if (ret) {
874                 pr_err("update_gpuvm_pte() failed\n");
875                 goto update_gpuvm_pte_failed;
876         }
877
878         return 0;
879
880 update_gpuvm_pte_failed:
881         unmap_bo_from_gpuvm(adev, entry, sync);
882         return ret;
883 }
884
885 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
886 {
887         struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
888
889         if (!sg)
890                 return NULL;
891         if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
892                 kfree(sg);
893                 return NULL;
894         }
895         sg->sgl->dma_address = addr;
896         sg->sgl->length = size;
897 #ifdef CONFIG_NEED_SG_DMA_LENGTH
898         sg->sgl->dma_length = size;
899 #endif
900         return sg;
901 }
902
903 static int process_validate_vms(struct amdkfd_process_info *process_info)
904 {
905         struct amdgpu_vm *peer_vm;
906         int ret;
907
908         list_for_each_entry(peer_vm, &process_info->vm_list_head,
909                             vm_list_node) {
910                 ret = vm_validate_pt_pd_bos(peer_vm);
911                 if (ret)
912                         return ret;
913         }
914
915         return 0;
916 }
917
918 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
919                                  struct amdgpu_sync *sync)
920 {
921         struct amdgpu_vm *peer_vm;
922         int ret;
923
924         list_for_each_entry(peer_vm, &process_info->vm_list_head,
925                             vm_list_node) {
926                 struct amdgpu_bo *pd = peer_vm->root.base.bo;
927
928                 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
929                                        AMDGPU_SYNC_NE_OWNER,
930                                        AMDGPU_FENCE_OWNER_KFD);
931                 if (ret)
932                         return ret;
933         }
934
935         return 0;
936 }
937
938 static int process_update_pds(struct amdkfd_process_info *process_info,
939                               struct amdgpu_sync *sync)
940 {
941         struct amdgpu_vm *peer_vm;
942         int ret;
943
944         list_for_each_entry(peer_vm, &process_info->vm_list_head,
945                             vm_list_node) {
946                 ret = vm_update_pds(peer_vm, sync);
947                 if (ret)
948                         return ret;
949         }
950
951         return 0;
952 }
953
954 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
955                        struct dma_fence **ef)
956 {
957         struct amdkfd_process_info *info = NULL;
958         int ret;
959
960         if (!*process_info) {
961                 info = kzalloc(sizeof(*info), GFP_KERNEL);
962                 if (!info)
963                         return -ENOMEM;
964
965                 mutex_init(&info->lock);
966                 INIT_LIST_HEAD(&info->vm_list_head);
967                 INIT_LIST_HEAD(&info->kfd_bo_list);
968                 INIT_LIST_HEAD(&info->userptr_valid_list);
969                 INIT_LIST_HEAD(&info->userptr_inval_list);
970
971                 info->eviction_fence =
972                         amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
973                                                    current->mm);
974                 if (!info->eviction_fence) {
975                         pr_err("Failed to create eviction fence\n");
976                         ret = -ENOMEM;
977                         goto create_evict_fence_fail;
978                 }
979
980                 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
981                 atomic_set(&info->evicted_bos, 0);
982                 INIT_DELAYED_WORK(&info->restore_userptr_work,
983                                   amdgpu_amdkfd_restore_userptr_worker);
984
985                 *process_info = info;
986                 *ef = dma_fence_get(&info->eviction_fence->base);
987         }
988
989         vm->process_info = *process_info;
990
991         /* Validate page directory and attach eviction fence */
992         ret = amdgpu_bo_reserve(vm->root.base.bo, true);
993         if (ret)
994                 goto reserve_pd_fail;
995         ret = vm_validate_pt_pd_bos(vm);
996         if (ret) {
997                 pr_err("validate_pt_pd_bos() failed\n");
998                 goto validate_pd_fail;
999         }
1000         ret = amdgpu_bo_sync_wait(vm->root.base.bo,
1001                                   AMDGPU_FENCE_OWNER_KFD, false);
1002         if (ret)
1003                 goto wait_pd_fail;
1004         ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
1005         if (ret)
1006                 goto reserve_shared_fail;
1007         amdgpu_bo_fence(vm->root.base.bo,
1008                         &vm->process_info->eviction_fence->base, true);
1009         amdgpu_bo_unreserve(vm->root.base.bo);
1010
1011         /* Update process info */
1012         mutex_lock(&vm->process_info->lock);
1013         list_add_tail(&vm->vm_list_node,
1014                         &(vm->process_info->vm_list_head));
1015         vm->process_info->n_vms++;
1016         mutex_unlock(&vm->process_info->lock);
1017
1018         return 0;
1019
1020 reserve_shared_fail:
1021 wait_pd_fail:
1022 validate_pd_fail:
1023         amdgpu_bo_unreserve(vm->root.base.bo);
1024 reserve_pd_fail:
1025         vm->process_info = NULL;
1026         if (info) {
1027                 /* Two fence references: one in info and one in *ef */
1028                 dma_fence_put(&info->eviction_fence->base);
1029                 dma_fence_put(*ef);
1030                 *ef = NULL;
1031                 *process_info = NULL;
1032                 put_pid(info->pid);
1033 create_evict_fence_fail:
1034                 mutex_destroy(&info->lock);
1035                 kfree(info);
1036         }
1037         return ret;
1038 }
1039
1040 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
1041                                           void **vm, void **process_info,
1042                                           struct dma_fence **ef)
1043 {
1044         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1045         struct amdgpu_vm *new_vm;
1046         int ret;
1047
1048         new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
1049         if (!new_vm)
1050                 return -ENOMEM;
1051
1052         /* Initialize AMDGPU part of the VM */
1053         ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
1054         if (ret) {
1055                 pr_err("Failed init vm ret %d\n", ret);
1056                 goto amdgpu_vm_init_fail;
1057         }
1058
1059         /* Initialize KFD part of the VM and process info */
1060         ret = init_kfd_vm(new_vm, process_info, ef);
1061         if (ret)
1062                 goto init_kfd_vm_fail;
1063
1064         *vm = (void *) new_vm;
1065
1066         return 0;
1067
1068 init_kfd_vm_fail:
1069         amdgpu_vm_fini(adev, new_vm);
1070 amdgpu_vm_init_fail:
1071         kfree(new_vm);
1072         return ret;
1073 }
1074
1075 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
1076                                            struct file *filp, u32 pasid,
1077                                            void **vm, void **process_info,
1078                                            struct dma_fence **ef)
1079 {
1080         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1081         struct drm_file *drm_priv = filp->private_data;
1082         struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
1083         struct amdgpu_vm *avm = &drv_priv->vm;
1084         int ret;
1085
1086         /* Already a compute VM? */
1087         if (avm->process_info)
1088                 return -EINVAL;
1089
1090         /* Convert VM into a compute VM */
1091         ret = amdgpu_vm_make_compute(adev, avm, pasid);
1092         if (ret)
1093                 return ret;
1094
1095         /* Initialize KFD part of the VM and process info */
1096         ret = init_kfd_vm(avm, process_info, ef);
1097         if (ret)
1098                 return ret;
1099
1100         *vm = (void *)avm;
1101
1102         return 0;
1103 }
1104
1105 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1106                                     struct amdgpu_vm *vm)
1107 {
1108         struct amdkfd_process_info *process_info = vm->process_info;
1109         struct amdgpu_bo *pd = vm->root.base.bo;
1110
1111         if (!process_info)
1112                 return;
1113
1114         /* Release eviction fence from PD */
1115         amdgpu_bo_reserve(pd, false);
1116         amdgpu_bo_fence(pd, NULL, false);
1117         amdgpu_bo_unreserve(pd);
1118
1119         /* Update process info */
1120         mutex_lock(&process_info->lock);
1121         process_info->n_vms--;
1122         list_del(&vm->vm_list_node);
1123         mutex_unlock(&process_info->lock);
1124
1125         vm->process_info = NULL;
1126
1127         /* Release per-process resources when last compute VM is destroyed */
1128         if (!process_info->n_vms) {
1129                 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1130                 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1131                 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1132
1133                 dma_fence_put(&process_info->eviction_fence->base);
1134                 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1135                 put_pid(process_info->pid);
1136                 mutex_destroy(&process_info->lock);
1137                 kfree(process_info);
1138         }
1139 }
1140
1141 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1142 {
1143         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1144         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1145
1146         if (WARN_ON(!kgd || !vm))
1147                 return;
1148
1149         pr_debug("Destroying process vm %p\n", vm);
1150
1151         /* Release the VM context */
1152         amdgpu_vm_fini(adev, avm);
1153         kfree(vm);
1154 }
1155
1156 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1157 {
1158         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1159         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1160
1161         if (WARN_ON(!kgd || !vm))
1162                 return;
1163
1164         pr_debug("Releasing process vm %p\n", vm);
1165
1166         /* The original pasid of amdgpu vm has already been
1167          * released during making a amdgpu vm to a compute vm
1168          * The current pasid is managed by kfd and will be
1169          * released on kfd process destroy. Set amdgpu pasid
1170          * to 0 to avoid duplicate release.
1171          */
1172         amdgpu_vm_release_compute(adev, avm);
1173 }
1174
1175 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1176 {
1177         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1178         struct amdgpu_bo *pd = avm->root.base.bo;
1179         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1180
1181         if (adev->asic_type < CHIP_VEGA10)
1182                 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1183         return avm->pd_phys_addr;
1184 }
1185
1186 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1187                 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1188                 void *vm, struct kgd_mem **mem,
1189                 uint64_t *offset, uint32_t flags)
1190 {
1191         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1192         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1193         enum ttm_bo_type bo_type = ttm_bo_type_device;
1194         struct sg_table *sg = NULL;
1195         uint64_t user_addr = 0;
1196         struct amdgpu_bo *bo;
1197         struct drm_gem_object *gobj;
1198         u32 domain, alloc_domain;
1199         u64 alloc_flags;
1200         int ret;
1201
1202         /*
1203          * Check on which domain to allocate BO
1204          */
1205         if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1206                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1207                 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1208                 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1209                         AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1210                         AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1211         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1212                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1213                 alloc_flags = 0;
1214         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1215                 domain = AMDGPU_GEM_DOMAIN_GTT;
1216                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1217                 alloc_flags = 0;
1218                 if (!offset || !*offset)
1219                         return -EINVAL;
1220                 user_addr = untagged_addr(*offset);
1221         } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1222                         KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1223                 domain = AMDGPU_GEM_DOMAIN_GTT;
1224                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1225                 bo_type = ttm_bo_type_sg;
1226                 alloc_flags = 0;
1227                 if (size > UINT_MAX)
1228                         return -EINVAL;
1229                 sg = create_doorbell_sg(*offset, size);
1230                 if (!sg)
1231                         return -ENOMEM;
1232         } else {
1233                 return -EINVAL;
1234         }
1235
1236         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1237         if (!*mem) {
1238                 ret = -ENOMEM;
1239                 goto err;
1240         }
1241         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1242         mutex_init(&(*mem)->lock);
1243         (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1244
1245         /* Workaround for AQL queue wraparound bug. Map the same
1246          * memory twice. That means we only actually allocate half
1247          * the memory.
1248          */
1249         if ((*mem)->aql_queue)
1250                 size = size >> 1;
1251
1252         (*mem)->alloc_flags = flags;
1253
1254         amdgpu_sync_create(&(*mem)->sync);
1255
1256         ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1257         if (ret) {
1258                 pr_debug("Insufficient memory\n");
1259                 goto err_reserve_limit;
1260         }
1261
1262         pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1263                         va, size, domain_string(alloc_domain));
1264
1265         ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
1266                                        bo_type, NULL, &gobj);
1267         if (ret) {
1268                 pr_debug("Failed to create BO on domain %s. ret %d\n",
1269                          domain_string(alloc_domain), ret);
1270                 goto err_bo_create;
1271         }
1272         bo = gem_to_amdgpu_bo(gobj);
1273         if (bo_type == ttm_bo_type_sg) {
1274                 bo->tbo.sg = sg;
1275                 bo->tbo.ttm->sg = sg;
1276         }
1277         bo->kfd_bo = *mem;
1278         (*mem)->bo = bo;
1279         if (user_addr)
1280                 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1281
1282         (*mem)->va = va;
1283         (*mem)->domain = domain;
1284         (*mem)->mapped_to_gpu_memory = 0;
1285         (*mem)->process_info = avm->process_info;
1286         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1287
1288         if (user_addr) {
1289                 ret = init_user_pages(*mem, user_addr);
1290                 if (ret)
1291                         goto allocate_init_user_pages_failed;
1292         }
1293
1294         if (offset)
1295                 *offset = amdgpu_bo_mmap_offset(bo);
1296
1297         return 0;
1298
1299 allocate_init_user_pages_failed:
1300         remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1301         amdgpu_bo_unref(&bo);
1302         /* Don't unreserve system mem limit twice */
1303         goto err_reserve_limit;
1304 err_bo_create:
1305         unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1306 err_reserve_limit:
1307         mutex_destroy(&(*mem)->lock);
1308         kfree(*mem);
1309 err:
1310         if (sg) {
1311                 sg_free_table(sg);
1312                 kfree(sg);
1313         }
1314         return ret;
1315 }
1316
1317 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1318                 struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
1319 {
1320         struct amdkfd_process_info *process_info = mem->process_info;
1321         unsigned long bo_size = mem->bo->tbo.base.size;
1322         struct kfd_bo_va_list *entry, *tmp;
1323         struct bo_vm_reservation_context ctx;
1324         struct ttm_validate_buffer *bo_list_entry;
1325         unsigned int mapped_to_gpu_memory;
1326         int ret;
1327         bool is_imported = false;
1328
1329         mutex_lock(&mem->lock);
1330         mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1331         is_imported = mem->is_imported;
1332         mutex_unlock(&mem->lock);
1333         /* lock is not needed after this, since mem is unused and will
1334          * be freed anyway
1335          */
1336
1337         if (mapped_to_gpu_memory > 0) {
1338                 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1339                                 mem->va, bo_size);
1340                 return -EBUSY;
1341         }
1342
1343         /* Make sure restore workers don't access the BO any more */
1344         bo_list_entry = &mem->validate_list;
1345         mutex_lock(&process_info->lock);
1346         list_del(&bo_list_entry->head);
1347         mutex_unlock(&process_info->lock);
1348
1349         /* No more MMU notifiers */
1350         amdgpu_mn_unregister(mem->bo);
1351
1352         ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1353         if (unlikely(ret))
1354                 return ret;
1355
1356         /* The eviction fence should be removed by the last unmap.
1357          * TODO: Log an error condition if the bo still has the eviction fence
1358          * attached
1359          */
1360         amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1361                                         process_info->eviction_fence);
1362         pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1363                 mem->va + bo_size * (1 + mem->aql_queue));
1364
1365         /* Remove from VM internal data structures */
1366         list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1367                 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1368                                 entry, bo_size);
1369
1370         ret = unreserve_bo_and_vms(&ctx, false, false);
1371
1372         /* Free the sync object */
1373         amdgpu_sync_free(&mem->sync);
1374
1375         /* If the SG is not NULL, it's one we created for a doorbell or mmio
1376          * remap BO. We need to free it.
1377          */
1378         if (mem->bo->tbo.sg) {
1379                 sg_free_table(mem->bo->tbo.sg);
1380                 kfree(mem->bo->tbo.sg);
1381         }
1382
1383         /* Update the size of the BO being freed if it was allocated from
1384          * VRAM and is not imported.
1385          */
1386         if (size) {
1387                 if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
1388                     (!is_imported))
1389                         *size = bo_size;
1390                 else
1391                         *size = 0;
1392         }
1393
1394         /* Free the BO*/
1395         drm_gem_object_put(&mem->bo->tbo.base);
1396         mutex_destroy(&mem->lock);
1397         kfree(mem);
1398
1399         return ret;
1400 }
1401
1402 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1403                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1404 {
1405         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1406         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1407         int ret;
1408         struct amdgpu_bo *bo;
1409         uint32_t domain;
1410         struct kfd_bo_va_list *entry;
1411         struct bo_vm_reservation_context ctx;
1412         struct kfd_bo_va_list *bo_va_entry = NULL;
1413         struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1414         unsigned long bo_size;
1415         bool is_invalid_userptr = false;
1416
1417         bo = mem->bo;
1418         if (!bo) {
1419                 pr_err("Invalid BO when mapping memory to GPU\n");
1420                 return -EINVAL;
1421         }
1422
1423         /* Make sure restore is not running concurrently. Since we
1424          * don't map invalid userptr BOs, we rely on the next restore
1425          * worker to do the mapping
1426          */
1427         mutex_lock(&mem->process_info->lock);
1428
1429         /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1430          * sure that the MMU notifier is no longer running
1431          * concurrently and the queues are actually stopped
1432          */
1433         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1434                 mmap_write_lock(current->mm);
1435                 is_invalid_userptr = atomic_read(&mem->invalid);
1436                 mmap_write_unlock(current->mm);
1437         }
1438
1439         mutex_lock(&mem->lock);
1440
1441         domain = mem->domain;
1442         bo_size = bo->tbo.base.size;
1443
1444         pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1445                         mem->va,
1446                         mem->va + bo_size * (1 + mem->aql_queue),
1447                         vm, domain_string(domain));
1448
1449         ret = reserve_bo_and_vm(mem, vm, &ctx);
1450         if (unlikely(ret))
1451                 goto out;
1452
1453         /* Userptr can be marked as "not invalid", but not actually be
1454          * validated yet (still in the system domain). In that case
1455          * the queues are still stopped and we can leave mapping for
1456          * the next restore worker
1457          */
1458         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1459             bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1460                 is_invalid_userptr = true;
1461
1462         if (check_if_add_bo_to_vm(avm, mem)) {
1463                 ret = add_bo_to_vm(adev, mem, avm, false,
1464                                 &bo_va_entry);
1465                 if (ret)
1466                         goto add_bo_to_vm_failed;
1467                 if (mem->aql_queue) {
1468                         ret = add_bo_to_vm(adev, mem, avm,
1469                                         true, &bo_va_entry_aql);
1470                         if (ret)
1471                                 goto add_bo_to_vm_failed_aql;
1472                 }
1473         } else {
1474                 ret = vm_validate_pt_pd_bos(avm);
1475                 if (unlikely(ret))
1476                         goto add_bo_to_vm_failed;
1477         }
1478
1479         if (mem->mapped_to_gpu_memory == 0 &&
1480             !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1481                 /* Validate BO only once. The eviction fence gets added to BO
1482                  * the first time it is mapped. Validate will wait for all
1483                  * background evictions to complete.
1484                  */
1485                 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1486                 if (ret) {
1487                         pr_debug("Validate failed\n");
1488                         goto map_bo_to_gpuvm_failed;
1489                 }
1490         }
1491
1492         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1493                 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1494                         pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1495                                         entry->va, entry->va + bo_size,
1496                                         entry);
1497
1498                         ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1499                                               is_invalid_userptr);
1500                         if (ret) {
1501                                 pr_err("Failed to map bo to gpuvm\n");
1502                                 goto map_bo_to_gpuvm_failed;
1503                         }
1504
1505                         ret = vm_update_pds(vm, ctx.sync);
1506                         if (ret) {
1507                                 pr_err("Failed to update page directories\n");
1508                                 goto map_bo_to_gpuvm_failed;
1509                         }
1510
1511                         entry->is_mapped = true;
1512                         mem->mapped_to_gpu_memory++;
1513                         pr_debug("\t INC mapping count %d\n",
1514                                         mem->mapped_to_gpu_memory);
1515                 }
1516         }
1517
1518         if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
1519                 amdgpu_bo_fence(bo,
1520                                 &avm->process_info->eviction_fence->base,
1521                                 true);
1522         ret = unreserve_bo_and_vms(&ctx, false, false);
1523
1524         goto out;
1525
1526 map_bo_to_gpuvm_failed:
1527         if (bo_va_entry_aql)
1528                 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1529 add_bo_to_vm_failed_aql:
1530         if (bo_va_entry)
1531                 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1532 add_bo_to_vm_failed:
1533         unreserve_bo_and_vms(&ctx, false, false);
1534 out:
1535         mutex_unlock(&mem->process_info->lock);
1536         mutex_unlock(&mem->lock);
1537         return ret;
1538 }
1539
1540 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1541                 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1542 {
1543         struct amdgpu_device *adev = get_amdgpu_device(kgd);
1544         struct amdkfd_process_info *process_info =
1545                 ((struct amdgpu_vm *)vm)->process_info;
1546         unsigned long bo_size = mem->bo->tbo.base.size;
1547         struct kfd_bo_va_list *entry;
1548         struct bo_vm_reservation_context ctx;
1549         int ret;
1550
1551         mutex_lock(&mem->lock);
1552
1553         ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1554         if (unlikely(ret))
1555                 goto out;
1556         /* If no VMs were reserved, it means the BO wasn't actually mapped */
1557         if (ctx.n_vms == 0) {
1558                 ret = -EINVAL;
1559                 goto unreserve_out;
1560         }
1561
1562         ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1563         if (unlikely(ret))
1564                 goto unreserve_out;
1565
1566         pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1567                 mem->va,
1568                 mem->va + bo_size * (1 + mem->aql_queue),
1569                 vm);
1570
1571         list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1572                 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1573                         pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1574                                         entry->va,
1575                                         entry->va + bo_size,
1576                                         entry);
1577
1578                         ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1579                         if (ret == 0) {
1580                                 entry->is_mapped = false;
1581                         } else {
1582                                 pr_err("failed to unmap VA 0x%llx\n",
1583                                                 mem->va);
1584                                 goto unreserve_out;
1585                         }
1586
1587                         mem->mapped_to_gpu_memory--;
1588                         pr_debug("\t DEC mapping count %d\n",
1589                                         mem->mapped_to_gpu_memory);
1590                 }
1591         }
1592
1593         /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1594          * required.
1595          */
1596         if (mem->mapped_to_gpu_memory == 0 &&
1597             !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
1598             !mem->bo->tbo.pin_count)
1599                 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1600                                                 process_info->eviction_fence);
1601
1602 unreserve_out:
1603         unreserve_bo_and_vms(&ctx, false, false);
1604 out:
1605         mutex_unlock(&mem->lock);
1606         return ret;
1607 }
1608
1609 int amdgpu_amdkfd_gpuvm_sync_memory(
1610                 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1611 {
1612         struct amdgpu_sync sync;
1613         int ret;
1614
1615         amdgpu_sync_create(&sync);
1616
1617         mutex_lock(&mem->lock);
1618         amdgpu_sync_clone(&mem->sync, &sync);
1619         mutex_unlock(&mem->lock);
1620
1621         ret = amdgpu_sync_wait(&sync, intr);
1622         amdgpu_sync_free(&sync);
1623         return ret;
1624 }
1625
1626 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1627                 struct kgd_mem *mem, void **kptr, uint64_t *size)
1628 {
1629         int ret;
1630         struct amdgpu_bo *bo = mem->bo;
1631
1632         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1633                 pr_err("userptr can't be mapped to kernel\n");
1634                 return -EINVAL;
1635         }
1636
1637         /* delete kgd_mem from kfd_bo_list to avoid re-validating
1638          * this BO in BO's restoring after eviction.
1639          */
1640         mutex_lock(&mem->process_info->lock);
1641
1642         ret = amdgpu_bo_reserve(bo, true);
1643         if (ret) {
1644                 pr_err("Failed to reserve bo. ret %d\n", ret);
1645                 goto bo_reserve_failed;
1646         }
1647
1648         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1649         if (ret) {
1650                 pr_err("Failed to pin bo. ret %d\n", ret);
1651                 goto pin_failed;
1652         }
1653
1654         ret = amdgpu_bo_kmap(bo, kptr);
1655         if (ret) {
1656                 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1657                 goto kmap_failed;
1658         }
1659
1660         amdgpu_amdkfd_remove_eviction_fence(
1661                 bo, mem->process_info->eviction_fence);
1662         list_del_init(&mem->validate_list.head);
1663
1664         if (size)
1665                 *size = amdgpu_bo_size(bo);
1666
1667         amdgpu_bo_unreserve(bo);
1668
1669         mutex_unlock(&mem->process_info->lock);
1670         return 0;
1671
1672 kmap_failed:
1673         amdgpu_bo_unpin(bo);
1674 pin_failed:
1675         amdgpu_bo_unreserve(bo);
1676 bo_reserve_failed:
1677         mutex_unlock(&mem->process_info->lock);
1678
1679         return ret;
1680 }
1681
1682 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1683                                               struct kfd_vm_fault_info *mem)
1684 {
1685         struct amdgpu_device *adev;
1686
1687         adev = (struct amdgpu_device *)kgd;
1688         if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1689                 *mem = *adev->gmc.vm_fault_info;
1690                 mb();
1691                 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1692         }
1693         return 0;
1694 }
1695
1696 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1697                                       struct dma_buf *dma_buf,
1698                                       uint64_t va, void *vm,
1699                                       struct kgd_mem **mem, uint64_t *size,
1700                                       uint64_t *mmap_offset)
1701 {
1702         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1703         struct drm_gem_object *obj;
1704         struct amdgpu_bo *bo;
1705         struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1706
1707         if (dma_buf->ops != &amdgpu_dmabuf_ops)
1708                 /* Can't handle non-graphics buffers */
1709                 return -EINVAL;
1710
1711         obj = dma_buf->priv;
1712         if (drm_to_adev(obj->dev) != adev)
1713                 /* Can't handle buffers from other devices */
1714                 return -EINVAL;
1715
1716         bo = gem_to_amdgpu_bo(obj);
1717         if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1718                                     AMDGPU_GEM_DOMAIN_GTT)))
1719                 /* Only VRAM and GTT BOs are supported */
1720                 return -EINVAL;
1721
1722         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1723         if (!*mem)
1724                 return -ENOMEM;
1725
1726         if (size)
1727                 *size = amdgpu_bo_size(bo);
1728
1729         if (mmap_offset)
1730                 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1731
1732         INIT_LIST_HEAD(&(*mem)->bo_va_list);
1733         mutex_init(&(*mem)->lock);
1734
1735         (*mem)->alloc_flags =
1736                 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1737                 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
1738                 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
1739                 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1740
1741         drm_gem_object_get(&bo->tbo.base);
1742         (*mem)->bo = bo;
1743         (*mem)->va = va;
1744         (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1745                 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1746         (*mem)->mapped_to_gpu_memory = 0;
1747         (*mem)->process_info = avm->process_info;
1748         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1749         amdgpu_sync_create(&(*mem)->sync);
1750         (*mem)->is_imported = true;
1751
1752         return 0;
1753 }
1754
1755 /* Evict a userptr BO by stopping the queues if necessary
1756  *
1757  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1758  * cannot do any memory allocations, and cannot take any locks that
1759  * are held elsewhere while allocating memory. Therefore this is as
1760  * simple as possible, using atomic counters.
1761  *
1762  * It doesn't do anything to the BO itself. The real work happens in
1763  * restore, where we get updated page addresses. This function only
1764  * ensures that GPU access to the BO is stopped.
1765  */
1766 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1767                                 struct mm_struct *mm)
1768 {
1769         struct amdkfd_process_info *process_info = mem->process_info;
1770         int evicted_bos;
1771         int r = 0;
1772
1773         atomic_inc(&mem->invalid);
1774         evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1775         if (evicted_bos == 1) {
1776                 /* First eviction, stop the queues */
1777                 r = kgd2kfd_quiesce_mm(mm);
1778                 if (r)
1779                         pr_err("Failed to quiesce KFD\n");
1780                 schedule_delayed_work(&process_info->restore_userptr_work,
1781                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1782         }
1783
1784         return r;
1785 }
1786
1787 /* Update invalid userptr BOs
1788  *
1789  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1790  * userptr_inval_list and updates user pages for all BOs that have
1791  * been invalidated since their last update.
1792  */
1793 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1794                                      struct mm_struct *mm)
1795 {
1796         struct kgd_mem *mem, *tmp_mem;
1797         struct amdgpu_bo *bo;
1798         struct ttm_operation_ctx ctx = { false, false };
1799         int invalid, ret;
1800
1801         /* Move all invalidated BOs to the userptr_inval_list and
1802          * release their user pages by migration to the CPU domain
1803          */
1804         list_for_each_entry_safe(mem, tmp_mem,
1805                                  &process_info->userptr_valid_list,
1806                                  validate_list.head) {
1807                 if (!atomic_read(&mem->invalid))
1808                         continue; /* BO is still valid */
1809
1810                 bo = mem->bo;
1811
1812                 if (amdgpu_bo_reserve(bo, true))
1813                         return -EAGAIN;
1814                 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1815                 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1816                 amdgpu_bo_unreserve(bo);
1817                 if (ret) {
1818                         pr_err("%s: Failed to invalidate userptr BO\n",
1819                                __func__);
1820                         return -EAGAIN;
1821                 }
1822
1823                 list_move_tail(&mem->validate_list.head,
1824                                &process_info->userptr_inval_list);
1825         }
1826
1827         if (list_empty(&process_info->userptr_inval_list))
1828                 return 0; /* All evicted userptr BOs were freed */
1829
1830         /* Go through userptr_inval_list and update any invalid user_pages */
1831         list_for_each_entry(mem, &process_info->userptr_inval_list,
1832                             validate_list.head) {
1833                 invalid = atomic_read(&mem->invalid);
1834                 if (!invalid)
1835                         /* BO hasn't been invalidated since the last
1836                          * revalidation attempt. Keep its BO list.
1837                          */
1838                         continue;
1839
1840                 bo = mem->bo;
1841
1842                 /* Get updated user pages */
1843                 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1844                 if (ret) {
1845                         pr_debug("%s: Failed to get user pages: %d\n",
1846                                 __func__, ret);
1847
1848                         /* Return error -EBUSY or -ENOMEM, retry restore */
1849                         return ret;
1850                 }
1851
1852                 /*
1853                  * FIXME: Cannot ignore the return code, must hold
1854                  * notifier_lock
1855                  */
1856                 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1857
1858                 /* Mark the BO as valid unless it was invalidated
1859                  * again concurrently.
1860                  */
1861                 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1862                         return -EAGAIN;
1863         }
1864
1865         return 0;
1866 }
1867
1868 /* Validate invalid userptr BOs
1869  *
1870  * Validates BOs on the userptr_inval_list, and moves them back to the
1871  * userptr_valid_list. Also updates GPUVM page tables with new page
1872  * addresses and waits for the page table updates to complete.
1873  */
1874 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1875 {
1876         struct amdgpu_bo_list_entry *pd_bo_list_entries;
1877         struct list_head resv_list, duplicates;
1878         struct ww_acquire_ctx ticket;
1879         struct amdgpu_sync sync;
1880
1881         struct amdgpu_vm *peer_vm;
1882         struct kgd_mem *mem, *tmp_mem;
1883         struct amdgpu_bo *bo;
1884         struct ttm_operation_ctx ctx = { false, false };
1885         int i, ret;
1886
1887         pd_bo_list_entries = kcalloc(process_info->n_vms,
1888                                      sizeof(struct amdgpu_bo_list_entry),
1889                                      GFP_KERNEL);
1890         if (!pd_bo_list_entries) {
1891                 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1892                 ret = -ENOMEM;
1893                 goto out_no_mem;
1894         }
1895
1896         INIT_LIST_HEAD(&resv_list);
1897         INIT_LIST_HEAD(&duplicates);
1898
1899         /* Get all the page directory BOs that need to be reserved */
1900         i = 0;
1901         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1902                             vm_list_node)
1903                 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1904                                     &pd_bo_list_entries[i++]);
1905         /* Add the userptr_inval_list entries to resv_list */
1906         list_for_each_entry(mem, &process_info->userptr_inval_list,
1907                             validate_list.head) {
1908                 list_add_tail(&mem->resv_list.head, &resv_list);
1909                 mem->resv_list.bo = mem->validate_list.bo;
1910                 mem->resv_list.num_shared = mem->validate_list.num_shared;
1911         }
1912
1913         /* Reserve all BOs and page tables for validation */
1914         ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1915         WARN(!list_empty(&duplicates), "Duplicates should be empty");
1916         if (ret)
1917                 goto out_free;
1918
1919         amdgpu_sync_create(&sync);
1920
1921         ret = process_validate_vms(process_info);
1922         if (ret)
1923                 goto unreserve_out;
1924
1925         /* Validate BOs and update GPUVM page tables */
1926         list_for_each_entry_safe(mem, tmp_mem,
1927                                  &process_info->userptr_inval_list,
1928                                  validate_list.head) {
1929                 struct kfd_bo_va_list *bo_va_entry;
1930
1931                 bo = mem->bo;
1932
1933                 /* Validate the BO if we got user pages */
1934                 if (bo->tbo.ttm->pages[0]) {
1935                         amdgpu_bo_placement_from_domain(bo, mem->domain);
1936                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1937                         if (ret) {
1938                                 pr_err("%s: failed to validate BO\n", __func__);
1939                                 goto unreserve_out;
1940                         }
1941                 }
1942
1943                 list_move_tail(&mem->validate_list.head,
1944                                &process_info->userptr_valid_list);
1945
1946                 /* Update mapping. If the BO was not validated
1947                  * (because we couldn't get user pages), this will
1948                  * clear the page table entries, which will result in
1949                  * VM faults if the GPU tries to access the invalid
1950                  * memory.
1951                  */
1952                 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1953                         if (!bo_va_entry->is_mapped)
1954                                 continue;
1955
1956                         ret = update_gpuvm_pte((struct amdgpu_device *)
1957                                                bo_va_entry->kgd_dev,
1958                                                bo_va_entry, &sync);
1959                         if (ret) {
1960                                 pr_err("%s: update PTE failed\n", __func__);
1961                                 /* make sure this gets validated again */
1962                                 atomic_inc(&mem->invalid);
1963                                 goto unreserve_out;
1964                         }
1965                 }
1966         }
1967
1968         /* Update page directories */
1969         ret = process_update_pds(process_info, &sync);
1970
1971 unreserve_out:
1972         ttm_eu_backoff_reservation(&ticket, &resv_list);
1973         amdgpu_sync_wait(&sync, false);
1974         amdgpu_sync_free(&sync);
1975 out_free:
1976         kfree(pd_bo_list_entries);
1977 out_no_mem:
1978
1979         return ret;
1980 }
1981
1982 /* Worker callback to restore evicted userptr BOs
1983  *
1984  * Tries to update and validate all userptr BOs. If successful and no
1985  * concurrent evictions happened, the queues are restarted. Otherwise,
1986  * reschedule for another attempt later.
1987  */
1988 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1989 {
1990         struct delayed_work *dwork = to_delayed_work(work);
1991         struct amdkfd_process_info *process_info =
1992                 container_of(dwork, struct amdkfd_process_info,
1993                              restore_userptr_work);
1994         struct task_struct *usertask;
1995         struct mm_struct *mm;
1996         int evicted_bos;
1997
1998         evicted_bos = atomic_read(&process_info->evicted_bos);
1999         if (!evicted_bos)
2000                 return;
2001
2002         /* Reference task and mm in case of concurrent process termination */
2003         usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
2004         if (!usertask)
2005                 return;
2006         mm = get_task_mm(usertask);
2007         if (!mm) {
2008                 put_task_struct(usertask);
2009                 return;
2010         }
2011
2012         mutex_lock(&process_info->lock);
2013
2014         if (update_invalid_user_pages(process_info, mm))
2015                 goto unlock_out;
2016         /* userptr_inval_list can be empty if all evicted userptr BOs
2017          * have been freed. In that case there is nothing to validate
2018          * and we can just restart the queues.
2019          */
2020         if (!list_empty(&process_info->userptr_inval_list)) {
2021                 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
2022                         goto unlock_out; /* Concurrent eviction, try again */
2023
2024                 if (validate_invalid_user_pages(process_info))
2025                         goto unlock_out;
2026         }
2027         /* Final check for concurrent evicton and atomic update. If
2028          * another eviction happens after successful update, it will
2029          * be a first eviction that calls quiesce_mm. The eviction
2030          * reference counting inside KFD will handle this case.
2031          */
2032         if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
2033             evicted_bos)
2034                 goto unlock_out;
2035         evicted_bos = 0;
2036         if (kgd2kfd_resume_mm(mm)) {
2037                 pr_err("%s: Failed to resume KFD\n", __func__);
2038                 /* No recovery from this failure. Probably the CP is
2039                  * hanging. No point trying again.
2040                  */
2041         }
2042
2043 unlock_out:
2044         mutex_unlock(&process_info->lock);
2045         mmput(mm);
2046         put_task_struct(usertask);
2047
2048         /* If validation failed, reschedule another attempt */
2049         if (evicted_bos)
2050                 schedule_delayed_work(&process_info->restore_userptr_work,
2051                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2052 }
2053
2054 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2055  *   KFD process identified by process_info
2056  *
2057  * @process_info: amdkfd_process_info of the KFD process
2058  *
2059  * After memory eviction, restore thread calls this function. The function
2060  * should be called when the Process is still valid. BO restore involves -
2061  *
2062  * 1.  Release old eviction fence and create new one
2063  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2064  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2065  *     BOs that need to be reserved.
2066  * 4.  Reserve all the BOs
2067  * 5.  Validate of PD and PT BOs.
2068  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2069  * 7.  Add fence to all PD and PT BOs.
2070  * 8.  Unreserve all BOs
2071  */
2072 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2073 {
2074         struct amdgpu_bo_list_entry *pd_bo_list;
2075         struct amdkfd_process_info *process_info = info;
2076         struct amdgpu_vm *peer_vm;
2077         struct kgd_mem *mem;
2078         struct bo_vm_reservation_context ctx;
2079         struct amdgpu_amdkfd_fence *new_fence;
2080         int ret = 0, i;
2081         struct list_head duplicate_save;
2082         struct amdgpu_sync sync_obj;
2083         unsigned long failed_size = 0;
2084         unsigned long total_size = 0;
2085
2086         INIT_LIST_HEAD(&duplicate_save);
2087         INIT_LIST_HEAD(&ctx.list);
2088         INIT_LIST_HEAD(&ctx.duplicates);
2089
2090         pd_bo_list = kcalloc(process_info->n_vms,
2091                              sizeof(struct amdgpu_bo_list_entry),
2092                              GFP_KERNEL);
2093         if (!pd_bo_list)
2094                 return -ENOMEM;
2095
2096         i = 0;
2097         mutex_lock(&process_info->lock);
2098         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2099                         vm_list_node)
2100                 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
2101
2102         /* Reserve all BOs and page tables/directory. Add all BOs from
2103          * kfd_bo_list to ctx.list
2104          */
2105         list_for_each_entry(mem, &process_info->kfd_bo_list,
2106                             validate_list.head) {
2107
2108                 list_add_tail(&mem->resv_list.head, &ctx.list);
2109                 mem->resv_list.bo = mem->validate_list.bo;
2110                 mem->resv_list.num_shared = mem->validate_list.num_shared;
2111         }
2112
2113         ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2114                                      false, &duplicate_save);
2115         if (ret) {
2116                 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2117                 goto ttm_reserve_fail;
2118         }
2119
2120         amdgpu_sync_create(&sync_obj);
2121
2122         /* Validate PDs and PTs */
2123         ret = process_validate_vms(process_info);
2124         if (ret)
2125                 goto validate_map_fail;
2126
2127         ret = process_sync_pds_resv(process_info, &sync_obj);
2128         if (ret) {
2129                 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2130                 goto validate_map_fail;
2131         }
2132
2133         /* Validate BOs and map them to GPUVM (update VM page tables). */
2134         list_for_each_entry(mem, &process_info->kfd_bo_list,
2135                             validate_list.head) {
2136
2137                 struct amdgpu_bo *bo = mem->bo;
2138                 uint32_t domain = mem->domain;
2139                 struct kfd_bo_va_list *bo_va_entry;
2140
2141                 total_size += amdgpu_bo_size(bo);
2142
2143                 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2144                 if (ret) {
2145                         pr_debug("Memory eviction: Validate BOs failed\n");
2146                         failed_size += amdgpu_bo_size(bo);
2147                         ret = amdgpu_amdkfd_bo_validate(bo,
2148                                                 AMDGPU_GEM_DOMAIN_GTT, false);
2149                         if (ret) {
2150                                 pr_debug("Memory eviction: Try again\n");
2151                                 goto validate_map_fail;
2152                         }
2153                 }
2154                 ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
2155                 if (ret) {
2156                         pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2157                         goto validate_map_fail;
2158                 }
2159                 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2160                                     bo_list) {
2161                         ret = update_gpuvm_pte((struct amdgpu_device *)
2162                                               bo_va_entry->kgd_dev,
2163                                               bo_va_entry,
2164                                               &sync_obj);
2165                         if (ret) {
2166                                 pr_debug("Memory eviction: update PTE failed. Try again\n");
2167                                 goto validate_map_fail;
2168                         }
2169                 }
2170         }
2171
2172         if (failed_size)
2173                 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2174
2175         /* Update page directories */
2176         ret = process_update_pds(process_info, &sync_obj);
2177         if (ret) {
2178                 pr_debug("Memory eviction: update PDs failed. Try again\n");
2179                 goto validate_map_fail;
2180         }
2181
2182         /* Wait for validate and PT updates to finish */
2183         amdgpu_sync_wait(&sync_obj, false);
2184
2185         /* Release old eviction fence and create new one, because fence only
2186          * goes from unsignaled to signaled, fence cannot be reused.
2187          * Use context and mm from the old fence.
2188          */
2189         new_fence = amdgpu_amdkfd_fence_create(
2190                                 process_info->eviction_fence->base.context,
2191                                 process_info->eviction_fence->mm);
2192         if (!new_fence) {
2193                 pr_err("Failed to create eviction fence\n");
2194                 ret = -ENOMEM;
2195                 goto validate_map_fail;
2196         }
2197         dma_fence_put(&process_info->eviction_fence->base);
2198         process_info->eviction_fence = new_fence;
2199         *ef = dma_fence_get(&new_fence->base);
2200
2201         /* Attach new eviction fence to all BOs */
2202         list_for_each_entry(mem, &process_info->kfd_bo_list,
2203                 validate_list.head)
2204                 amdgpu_bo_fence(mem->bo,
2205                         &process_info->eviction_fence->base, true);
2206
2207         /* Attach eviction fence to PD / PT BOs */
2208         list_for_each_entry(peer_vm, &process_info->vm_list_head,
2209                             vm_list_node) {
2210                 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2211
2212                 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2213         }
2214
2215 validate_map_fail:
2216         ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2217         amdgpu_sync_free(&sync_obj);
2218 ttm_reserve_fail:
2219         mutex_unlock(&process_info->lock);
2220         kfree(pd_bo_list);
2221         return ret;
2222 }
2223
2224 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2225 {
2226         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2227         struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2228         int ret;
2229
2230         if (!info || !gws)
2231                 return -EINVAL;
2232
2233         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2234         if (!*mem)
2235                 return -ENOMEM;
2236
2237         mutex_init(&(*mem)->lock);
2238         INIT_LIST_HEAD(&(*mem)->bo_va_list);
2239         (*mem)->bo = amdgpu_bo_ref(gws_bo);
2240         (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2241         (*mem)->process_info = process_info;
2242         add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2243         amdgpu_sync_create(&(*mem)->sync);
2244
2245
2246         /* Validate gws bo the first time it is added to process */
2247         mutex_lock(&(*mem)->process_info->lock);
2248         ret = amdgpu_bo_reserve(gws_bo, false);
2249         if (unlikely(ret)) {
2250                 pr_err("Reserve gws bo failed %d\n", ret);
2251                 goto bo_reservation_failure;
2252         }
2253
2254         ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2255         if (ret) {
2256                 pr_err("GWS BO validate failed %d\n", ret);
2257                 goto bo_validation_failure;
2258         }
2259         /* GWS resource is shared b/t amdgpu and amdkfd
2260          * Add process eviction fence to bo so they can
2261          * evict each other.
2262          */
2263         ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2264         if (ret)
2265                 goto reserve_shared_fail;
2266         amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2267         amdgpu_bo_unreserve(gws_bo);
2268         mutex_unlock(&(*mem)->process_info->lock);
2269
2270         return ret;
2271
2272 reserve_shared_fail:
2273 bo_validation_failure:
2274         amdgpu_bo_unreserve(gws_bo);
2275 bo_reservation_failure:
2276         mutex_unlock(&(*mem)->process_info->lock);
2277         amdgpu_sync_free(&(*mem)->sync);
2278         remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2279         amdgpu_bo_unref(&gws_bo);
2280         mutex_destroy(&(*mem)->lock);
2281         kfree(*mem);
2282         *mem = NULL;
2283         return ret;
2284 }
2285
2286 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2287 {
2288         int ret;
2289         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2290         struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2291         struct amdgpu_bo *gws_bo = kgd_mem->bo;
2292
2293         /* Remove BO from process's validate list so restore worker won't touch
2294          * it anymore
2295          */
2296         remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2297
2298         ret = amdgpu_bo_reserve(gws_bo, false);
2299         if (unlikely(ret)) {
2300                 pr_err("Reserve gws bo failed %d\n", ret);
2301                 //TODO add BO back to validate_list?
2302                 return ret;
2303         }
2304         amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2305                         process_info->eviction_fence);
2306         amdgpu_bo_unreserve(gws_bo);
2307         amdgpu_sync_free(&kgd_mem->sync);
2308         amdgpu_bo_unref(&gws_bo);
2309         mutex_destroy(&kgd_mem->lock);
2310         kfree(mem);
2311         return 0;
2312 }
2313
2314 /* Returns GPU-specific tiling mode information */
2315 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
2316                                 struct tile_config *config)
2317 {
2318         struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
2319
2320         config->gb_addr_config = adev->gfx.config.gb_addr_config;
2321         config->tile_config_ptr = adev->gfx.config.tile_mode_array;
2322         config->num_tile_configs =
2323                         ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2324         config->macro_tile_config_ptr =
2325                         adev->gfx.config.macrotile_mode_array;
2326         config->num_macro_tile_configs =
2327                         ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2328
2329         /* Those values are not set from GFX9 onwards */
2330         config->num_banks = adev->gfx.config.num_banks;
2331         config->num_ranks = adev->gfx.config.num_ranks;
2332
2333         return 0;
2334 }
This page took 0.176948 seconds and 4 git commands to generate.