]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
Merge patch series "RISC-V: Parse DT for Zkr to seed KASLR"
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gpuvm.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2014-2018 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #include <linux/dma-buf.h>
24 #include <linux/list.h>
25 #include <linux/pagemap.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/task.h>
28 #include <linux/fdtable.h>
29 #include <drm/ttm/ttm_tt.h>
30
31 #include <drm/drm_exec.h>
32
33 #include "amdgpu_object.h"
34 #include "amdgpu_gem.h"
35 #include "amdgpu_vm.h"
36 #include "amdgpu_hmm.h"
37 #include "amdgpu_amdkfd.h"
38 #include "amdgpu_dma_buf.h"
39 #include <uapi/linux/kfd_ioctl.h>
40 #include "amdgpu_xgmi.h"
41 #include "kfd_priv.h"
42 #include "kfd_smi_events.h"
43
44 /* Userptr restore delay, just long enough to allow consecutive VM
45  * changes to accumulate
46  */
47 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
48 #define AMDGPU_RESERVE_MEM_LIMIT                        (3UL << 29)
49
50 /*
51  * Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
52  * BO chunk
53  */
54 #define VRAM_AVAILABLITY_ALIGN (1 << 21)
55
56 /* Impose limit on how much memory KFD can use */
57 static struct {
58         uint64_t max_system_mem_limit;
59         uint64_t max_ttm_mem_limit;
60         int64_t system_mem_used;
61         int64_t ttm_mem_used;
62         spinlock_t mem_limit_lock;
63 } kfd_mem_limit;
64
65 static const char * const domain_bit_to_string[] = {
66                 "CPU",
67                 "GTT",
68                 "VRAM",
69                 "GDS",
70                 "GWS",
71                 "OA"
72 };
73
74 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
75
76 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
77
78 static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
79                 struct kgd_mem *mem)
80 {
81         struct kfd_mem_attachment *entry;
82
83         list_for_each_entry(entry, &mem->attachments, list)
84                 if (entry->bo_va->base.vm == avm)
85                         return true;
86
87         return false;
88 }
89
90 /**
91  * reuse_dmamap() - Check whether adev can share the original
92  * userptr BO
93  *
94  * If both adev and bo_adev are in direct mapping or
95  * in the same iommu group, they can share the original BO.
96  *
97  * @adev: Device to which can or cannot share the original BO
98  * @bo_adev: Device to which allocated BO belongs to
99  *
100  * Return: returns true if adev can share original userptr BO,
101  * false otherwise.
102  */
103 static bool reuse_dmamap(struct amdgpu_device *adev, struct amdgpu_device *bo_adev)
104 {
105         return (adev->ram_is_direct_mapped && bo_adev->ram_is_direct_mapped) ||
106                         (adev->dev->iommu_group == bo_adev->dev->iommu_group);
107 }
108
109 /* Set memory usage limits. Current, limits are
110  *  System (TTM + userptr) memory - 15/16th System RAM
111  *  TTM memory - 3/8th System RAM
112  */
113 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
114 {
115         struct sysinfo si;
116         uint64_t mem;
117
118         if (kfd_mem_limit.max_system_mem_limit)
119                 return;
120
121         si_meminfo(&si);
122         mem = si.totalram - si.totalhigh;
123         mem *= si.mem_unit;
124
125         spin_lock_init(&kfd_mem_limit.mem_limit_lock);
126         kfd_mem_limit.max_system_mem_limit = mem - (mem >> 6);
127         if (kfd_mem_limit.max_system_mem_limit < 2 * AMDGPU_RESERVE_MEM_LIMIT)
128                 kfd_mem_limit.max_system_mem_limit >>= 1;
129         else
130                 kfd_mem_limit.max_system_mem_limit -= AMDGPU_RESERVE_MEM_LIMIT;
131
132         kfd_mem_limit.max_ttm_mem_limit = ttm_tt_pages_limit() << PAGE_SHIFT;
133         pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
134                 (kfd_mem_limit.max_system_mem_limit >> 20),
135                 (kfd_mem_limit.max_ttm_mem_limit >> 20));
136 }
137
138 void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
139 {
140         kfd_mem_limit.system_mem_used += size;
141 }
142
143 /* Estimate page table size needed to represent a given memory size
144  *
145  * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
146  * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
147  * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
148  * for 2MB pages for TLB efficiency. However, small allocations and
149  * fragmented system memory still need some 4KB pages. We choose a
150  * compromise that should work in most cases without reserving too
151  * much memory for page tables unnecessarily (factor 16K, >> 14).
152  */
153
154 #define ESTIMATE_PT_SIZE(mem_size) max(((mem_size) >> 14), AMDGPU_VM_RESERVED_VRAM)
155
156 /**
157  * amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size
158  * of buffer.
159  *
160  * @adev: Device to which allocated BO belongs to
161  * @size: Size of buffer, in bytes, encapsulated by B0. This should be
162  * equivalent to amdgpu_bo_size(BO)
163  * @alloc_flag: Flag used in allocating a BO as noted above
164  * @xcp_id: xcp_id is used to get xcp from xcp manager, one xcp is
165  * managed as one compute node in driver for app
166  *
167  * Return:
168  *      returns -ENOMEM in case of error, ZERO otherwise
169  */
170 int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
171                 uint64_t size, u32 alloc_flag, int8_t xcp_id)
172 {
173         uint64_t reserved_for_pt =
174                 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
175         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
176         uint64_t reserved_for_ras = (con ? con->reserved_pages_in_bytes : 0);
177         size_t system_mem_needed, ttm_mem_needed, vram_needed;
178         int ret = 0;
179         uint64_t vram_size = 0;
180
181         system_mem_needed = 0;
182         ttm_mem_needed = 0;
183         vram_needed = 0;
184         if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
185                 system_mem_needed = size;
186                 ttm_mem_needed = size;
187         } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
188                 /*
189                  * Conservatively round up the allocation requirement to 2 MB
190                  * to avoid fragmentation caused by 4K allocations in the tail
191                  * 2M BO chunk.
192                  */
193                 vram_needed = size;
194                 /*
195                  * For GFX 9.4.3, get the VRAM size from XCP structs
196                  */
197                 if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id))
198                         return -EINVAL;
199
200                 vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
201                 if (adev->flags & AMD_IS_APU) {
202                         system_mem_needed = size;
203                         ttm_mem_needed = size;
204                 }
205         } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
206                 system_mem_needed = size;
207         } else if (!(alloc_flag &
208                                 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
209                                  KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
210                 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
211                 return -ENOMEM;
212         }
213
214         spin_lock(&kfd_mem_limit.mem_limit_lock);
215
216         if (kfd_mem_limit.system_mem_used + system_mem_needed >
217             kfd_mem_limit.max_system_mem_limit)
218                 pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
219
220         if ((kfd_mem_limit.system_mem_used + system_mem_needed >
221              kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) ||
222             (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
223              kfd_mem_limit.max_ttm_mem_limit) ||
224             (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed >
225              vram_size - reserved_for_pt - reserved_for_ras - atomic64_read(&adev->vram_pin_size))) {
226                 ret = -ENOMEM;
227                 goto release;
228         }
229
230         /* Update memory accounting by decreasing available system
231          * memory, TTM memory and GPU memory as computed above
232          */
233         WARN_ONCE(vram_needed && !adev,
234                   "adev reference can't be null when vram is used");
235         if (adev && xcp_id >= 0) {
236                 adev->kfd.vram_used[xcp_id] += vram_needed;
237                 adev->kfd.vram_used_aligned[xcp_id] +=
238                                 (adev->flags & AMD_IS_APU) ?
239                                 vram_needed :
240                                 ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
241         }
242         kfd_mem_limit.system_mem_used += system_mem_needed;
243         kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
244
245 release:
246         spin_unlock(&kfd_mem_limit.mem_limit_lock);
247         return ret;
248 }
249
250 void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
251                 uint64_t size, u32 alloc_flag, int8_t xcp_id)
252 {
253         spin_lock(&kfd_mem_limit.mem_limit_lock);
254
255         if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
256                 kfd_mem_limit.system_mem_used -= size;
257                 kfd_mem_limit.ttm_mem_used -= size;
258         } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
259                 WARN_ONCE(!adev,
260                           "adev reference can't be null when alloc mem flags vram is set");
261                 if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id))
262                         goto release;
263
264                 if (adev) {
265                         adev->kfd.vram_used[xcp_id] -= size;
266                         if (adev->flags & AMD_IS_APU) {
267                                 adev->kfd.vram_used_aligned[xcp_id] -= size;
268                                 kfd_mem_limit.system_mem_used -= size;
269                                 kfd_mem_limit.ttm_mem_used -= size;
270                         } else {
271                                 adev->kfd.vram_used_aligned[xcp_id] -=
272                                         ALIGN(size, VRAM_AVAILABLITY_ALIGN);
273                         }
274                 }
275         } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
276                 kfd_mem_limit.system_mem_used -= size;
277         } else if (!(alloc_flag &
278                                 (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
279                                  KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
280                 pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
281                 goto release;
282         }
283         WARN_ONCE(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] < 0,
284                   "KFD VRAM memory accounting unbalanced for xcp: %d", xcp_id);
285         WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
286                   "KFD TTM memory accounting unbalanced");
287         WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
288                   "KFD system memory accounting unbalanced");
289
290 release:
291         spin_unlock(&kfd_mem_limit.mem_limit_lock);
292 }
293
294 void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
295 {
296         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
297         u32 alloc_flags = bo->kfd_bo->alloc_flags;
298         u64 size = amdgpu_bo_size(bo);
299
300         amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags,
301                                           bo->xcp_id);
302
303         kfree(bo->kfd_bo);
304 }
305
306 /**
307  * create_dmamap_sg_bo() - Creates a amdgpu_bo object to reflect information
308  * about USERPTR or DOOREBELL or MMIO BO.
309  *
310  * @adev: Device for which dmamap BO is being created
311  * @mem: BO of peer device that is being DMA mapped. Provides parameters
312  *       in building the dmamap BO
313  * @bo_out: Output parameter updated with handle of dmamap BO
314  */
315 static int
316 create_dmamap_sg_bo(struct amdgpu_device *adev,
317                  struct kgd_mem *mem, struct amdgpu_bo **bo_out)
318 {
319         struct drm_gem_object *gem_obj;
320         int ret;
321         uint64_t flags = 0;
322
323         ret = amdgpu_bo_reserve(mem->bo, false);
324         if (ret)
325                 return ret;
326
327         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)
328                 flags |= mem->bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
329                                         AMDGPU_GEM_CREATE_UNCACHED);
330
331         ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1,
332                         AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE | flags,
333                         ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj, 0);
334
335         amdgpu_bo_unreserve(mem->bo);
336
337         if (ret) {
338                 pr_err("Error in creating DMA mappable SG BO on domain: %d\n", ret);
339                 return -EINVAL;
340         }
341
342         *bo_out = gem_to_amdgpu_bo(gem_obj);
343         (*bo_out)->parent = amdgpu_bo_ref(mem->bo);
344         return ret;
345 }
346
347 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
348  *  reservation object.
349  *
350  * @bo: [IN] Remove eviction fence(s) from this BO
351  * @ef: [IN] This eviction fence is removed if it
352  *  is present in the shared list.
353  *
354  * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
355  */
356 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
357                                         struct amdgpu_amdkfd_fence *ef)
358 {
359         struct dma_fence *replacement;
360
361         if (!ef)
362                 return -EINVAL;
363
364         /* TODO: Instead of block before we should use the fence of the page
365          * table update and TLB flush here directly.
366          */
367         replacement = dma_fence_get_stub();
368         dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
369                                 replacement, DMA_RESV_USAGE_BOOKKEEP);
370         dma_fence_put(replacement);
371         return 0;
372 }
373
374 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
375 {
376         struct amdgpu_bo *root = bo;
377         struct amdgpu_vm_bo_base *vm_bo;
378         struct amdgpu_vm *vm;
379         struct amdkfd_process_info *info;
380         struct amdgpu_amdkfd_fence *ef;
381         int ret;
382
383         /* we can always get vm_bo from root PD bo.*/
384         while (root->parent)
385                 root = root->parent;
386
387         vm_bo = root->vm_bo;
388         if (!vm_bo)
389                 return 0;
390
391         vm = vm_bo->vm;
392         if (!vm)
393                 return 0;
394
395         info = vm->process_info;
396         if (!info || !info->eviction_fence)
397                 return 0;
398
399         ef = container_of(dma_fence_get(&info->eviction_fence->base),
400                         struct amdgpu_amdkfd_fence, base);
401
402         BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
403         ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
404         dma_resv_unlock(bo->tbo.base.resv);
405
406         dma_fence_put(&ef->base);
407         return ret;
408 }
409
410 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
411                                      bool wait)
412 {
413         struct ttm_operation_ctx ctx = { false, false };
414         int ret;
415
416         if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
417                  "Called with userptr BO"))
418                 return -EINVAL;
419
420         /* bo has been pinned, not need validate it */
421         if (bo->tbo.pin_count)
422                 return 0;
423
424         amdgpu_bo_placement_from_domain(bo, domain);
425
426         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
427         if (ret)
428                 goto validate_fail;
429         if (wait)
430                 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
431
432 validate_fail:
433         return ret;
434 }
435
436 int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
437                                         uint32_t domain,
438                                         struct dma_fence *fence)
439 {
440         int ret = amdgpu_bo_reserve(bo, false);
441
442         if (ret)
443                 return ret;
444
445         ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
446         if (ret)
447                 goto unreserve_out;
448
449         ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
450         if (ret)
451                 goto unreserve_out;
452
453         dma_resv_add_fence(bo->tbo.base.resv, fence,
454                            DMA_RESV_USAGE_BOOKKEEP);
455
456 unreserve_out:
457         amdgpu_bo_unreserve(bo);
458
459         return ret;
460 }
461
462 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
463 {
464         return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
465 }
466
467 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
468  *
469  * Page directories are not updated here because huge page handling
470  * during page table updates can invalidate page directory entries
471  * again. Page directories are only updated after updating page
472  * tables.
473  */
474 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm,
475                                  struct ww_acquire_ctx *ticket)
476 {
477         struct amdgpu_bo *pd = vm->root.bo;
478         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
479         int ret;
480
481         ret = amdgpu_vm_validate(adev, vm, ticket,
482                                  amdgpu_amdkfd_validate_vm_bo, NULL);
483         if (ret) {
484                 pr_err("failed to validate PT BOs\n");
485                 return ret;
486         }
487
488         vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
489
490         return 0;
491 }
492
493 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
494 {
495         struct amdgpu_bo *pd = vm->root.bo;
496         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
497         int ret;
498
499         ret = amdgpu_vm_update_pdes(adev, vm, false);
500         if (ret)
501                 return ret;
502
503         return amdgpu_sync_fence(sync, vm->last_update);
504 }
505
506 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
507 {
508         uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE |
509                                  AMDGPU_VM_MTYPE_DEFAULT;
510
511         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
512                 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
513         if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
514                 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
515
516         return amdgpu_gem_va_map_flags(adev, mapping_flags);
517 }
518
519 /**
520  * create_sg_table() - Create an sg_table for a contiguous DMA addr range
521  * @addr: The starting address to point to
522  * @size: Size of memory area in bytes being pointed to
523  *
524  * Allocates an instance of sg_table and initializes it to point to memory
525  * area specified by input parameters. The address used to build is assumed
526  * to be DMA mapped, if needed.
527  *
528  * DOORBELL or MMIO BOs use only one scatterlist node in their sg_table
529  * because they are physically contiguous.
530  *
531  * Return: Initialized instance of SG Table or NULL
532  */
533 static struct sg_table *create_sg_table(uint64_t addr, uint32_t size)
534 {
535         struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
536
537         if (!sg)
538                 return NULL;
539         if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
540                 kfree(sg);
541                 return NULL;
542         }
543         sg_dma_address(sg->sgl) = addr;
544         sg->sgl->length = size;
545 #ifdef CONFIG_NEED_SG_DMA_LENGTH
546         sg->sgl->dma_length = size;
547 #endif
548         return sg;
549 }
550
551 static int
552 kfd_mem_dmamap_userptr(struct kgd_mem *mem,
553                        struct kfd_mem_attachment *attachment)
554 {
555         enum dma_data_direction direction =
556                 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
557                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
558         struct ttm_operation_ctx ctx = {.interruptible = true};
559         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
560         struct amdgpu_device *adev = attachment->adev;
561         struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
562         struct ttm_tt *ttm = bo->tbo.ttm;
563         int ret;
564
565         if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
566                 return -EINVAL;
567
568         ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
569         if (unlikely(!ttm->sg))
570                 return -ENOMEM;
571
572         /* Same sequence as in amdgpu_ttm_tt_pin_userptr */
573         ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
574                                         ttm->num_pages, 0,
575                                         (u64)ttm->num_pages << PAGE_SHIFT,
576                                         GFP_KERNEL);
577         if (unlikely(ret))
578                 goto free_sg;
579
580         ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
581         if (unlikely(ret))
582                 goto release_sg;
583
584         amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
585         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
586         if (ret)
587                 goto unmap_sg;
588
589         return 0;
590
591 unmap_sg:
592         dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
593 release_sg:
594         pr_err("DMA map userptr failed: %d\n", ret);
595         sg_free_table(ttm->sg);
596 free_sg:
597         kfree(ttm->sg);
598         ttm->sg = NULL;
599         return ret;
600 }
601
602 static int
603 kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
604 {
605         struct ttm_operation_ctx ctx = {.interruptible = true};
606         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
607         int ret;
608
609         amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
610         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
611         if (ret)
612                 return ret;
613
614         amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
615         return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
616 }
617
618 /**
619  * kfd_mem_dmamap_sg_bo() - Create DMA mapped sg_table to access DOORBELL or MMIO BO
620  * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
621  * @attachment: Virtual address attachment of the BO on accessing device
622  *
623  * An access request from the device that owns DOORBELL does not require DMA mapping.
624  * This is because the request doesn't go through PCIe root complex i.e. it instead
625  * loops back. The need to DMA map arises only when accessing peer device's DOORBELL
626  *
627  * In contrast, all access requests for MMIO need to be DMA mapped without regard to
628  * device ownership. This is because access requests for MMIO go through PCIe root
629  * complex.
630  *
631  * This is accomplished in two steps:
632  *   - Obtain DMA mapped address of DOORBELL or MMIO memory that could be used
633  *         in updating requesting device's page table
634  *   - Signal TTM to mark memory pointed to by requesting device's BO as GPU
635  *         accessible. This allows an update of requesting device's page table
636  *         with entries associated with DOOREBELL or MMIO memory
637  *
638  * This method is invoked in the following contexts:
639  *   - Mapping of DOORBELL or MMIO BO of same or peer device
640  *   - Validating an evicted DOOREBELL or MMIO BO on device seeking access
641  *
642  * Return: ZERO if successful, NON-ZERO otherwise
643  */
644 static int
645 kfd_mem_dmamap_sg_bo(struct kgd_mem *mem,
646                      struct kfd_mem_attachment *attachment)
647 {
648         struct ttm_operation_ctx ctx = {.interruptible = true};
649         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
650         struct amdgpu_device *adev = attachment->adev;
651         struct ttm_tt *ttm = bo->tbo.ttm;
652         enum dma_data_direction dir;
653         dma_addr_t dma_addr;
654         bool mmio;
655         int ret;
656
657         /* Expect SG Table of dmapmap BO to be NULL */
658         mmio = (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP);
659         if (unlikely(ttm->sg)) {
660                 pr_err("SG Table of %d BO for peer device is UNEXPECTEDLY NON-NULL", mmio);
661                 return -EINVAL;
662         }
663
664         dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
665                         DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
666         dma_addr = mem->bo->tbo.sg->sgl->dma_address;
667         pr_debug("%d BO size: %d\n", mmio, mem->bo->tbo.sg->sgl->length);
668         pr_debug("%d BO address before DMA mapping: %llx\n", mmio, dma_addr);
669         dma_addr = dma_map_resource(adev->dev, dma_addr,
670                         mem->bo->tbo.sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
671         ret = dma_mapping_error(adev->dev, dma_addr);
672         if (unlikely(ret))
673                 return ret;
674         pr_debug("%d BO address after DMA mapping: %llx\n", mmio, dma_addr);
675
676         ttm->sg = create_sg_table(dma_addr, mem->bo->tbo.sg->sgl->length);
677         if (unlikely(!ttm->sg)) {
678                 ret = -ENOMEM;
679                 goto unmap_sg;
680         }
681
682         amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
683         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
684         if (unlikely(ret))
685                 goto free_sg;
686
687         return ret;
688
689 free_sg:
690         sg_free_table(ttm->sg);
691         kfree(ttm->sg);
692         ttm->sg = NULL;
693 unmap_sg:
694         dma_unmap_resource(adev->dev, dma_addr, mem->bo->tbo.sg->sgl->length,
695                            dir, DMA_ATTR_SKIP_CPU_SYNC);
696         return ret;
697 }
698
699 static int
700 kfd_mem_dmamap_attachment(struct kgd_mem *mem,
701                           struct kfd_mem_attachment *attachment)
702 {
703         switch (attachment->type) {
704         case KFD_MEM_ATT_SHARED:
705                 return 0;
706         case KFD_MEM_ATT_USERPTR:
707                 return kfd_mem_dmamap_userptr(mem, attachment);
708         case KFD_MEM_ATT_DMABUF:
709                 return kfd_mem_dmamap_dmabuf(attachment);
710         case KFD_MEM_ATT_SG:
711                 return kfd_mem_dmamap_sg_bo(mem, attachment);
712         default:
713                 WARN_ON_ONCE(1);
714         }
715         return -EINVAL;
716 }
717
718 static void
719 kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
720                          struct kfd_mem_attachment *attachment)
721 {
722         enum dma_data_direction direction =
723                 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
724                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
725         struct ttm_operation_ctx ctx = {.interruptible = false};
726         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
727         struct amdgpu_device *adev = attachment->adev;
728         struct ttm_tt *ttm = bo->tbo.ttm;
729
730         if (unlikely(!ttm->sg))
731                 return;
732
733         amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
734         ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
735
736         dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
737         sg_free_table(ttm->sg);
738         kfree(ttm->sg);
739         ttm->sg = NULL;
740 }
741
742 static void
743 kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
744 {
745         /* This is a no-op. We don't want to trigger eviction fences when
746          * unmapping DMABufs. Therefore the invalidation (moving to system
747          * domain) is done in kfd_mem_dmamap_dmabuf.
748          */
749 }
750
751 /**
752  * kfd_mem_dmaunmap_sg_bo() - Free DMA mapped sg_table of DOORBELL or MMIO BO
753  * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
754  * @attachment: Virtual address attachment of the BO on accessing device
755  *
756  * The method performs following steps:
757  *   - Signal TTM to mark memory pointed to by BO as GPU inaccessible
758  *   - Free SG Table that is used to encapsulate DMA mapped memory of
759  *          peer device's DOORBELL or MMIO memory
760  *
761  * This method is invoked in the following contexts:
762  *     UNMapping of DOORBELL or MMIO BO on a device having access to its memory
763  *     Eviction of DOOREBELL or MMIO BO on device having access to its memory
764  *
765  * Return: void
766  */
767 static void
768 kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
769                        struct kfd_mem_attachment *attachment)
770 {
771         struct ttm_operation_ctx ctx = {.interruptible = true};
772         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
773         struct amdgpu_device *adev = attachment->adev;
774         struct ttm_tt *ttm = bo->tbo.ttm;
775         enum dma_data_direction dir;
776
777         if (unlikely(!ttm->sg)) {
778                 pr_debug("SG Table of BO is NULL");
779                 return;
780         }
781
782         amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
783         ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
784
785         dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
786                                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
787         dma_unmap_resource(adev->dev, ttm->sg->sgl->dma_address,
788                         ttm->sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
789         sg_free_table(ttm->sg);
790         kfree(ttm->sg);
791         ttm->sg = NULL;
792         bo->tbo.sg = NULL;
793 }
794
795 static void
796 kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
797                             struct kfd_mem_attachment *attachment)
798 {
799         switch (attachment->type) {
800         case KFD_MEM_ATT_SHARED:
801                 break;
802         case KFD_MEM_ATT_USERPTR:
803                 kfd_mem_dmaunmap_userptr(mem, attachment);
804                 break;
805         case KFD_MEM_ATT_DMABUF:
806                 kfd_mem_dmaunmap_dmabuf(attachment);
807                 break;
808         case KFD_MEM_ATT_SG:
809                 kfd_mem_dmaunmap_sg_bo(mem, attachment);
810                 break;
811         default:
812                 WARN_ON_ONCE(1);
813         }
814 }
815
816 static int kfd_mem_export_dmabuf(struct kgd_mem *mem)
817 {
818         if (!mem->dmabuf) {
819                 struct amdgpu_device *bo_adev;
820                 struct dma_buf *dmabuf;
821                 int r, fd;
822
823                 bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
824                 r = drm_gem_prime_handle_to_fd(&bo_adev->ddev, bo_adev->kfd.client.file,
825                                                mem->gem_handle,
826                         mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
827                                                DRM_RDWR : 0, &fd);
828                 if (r)
829                         return r;
830                 dmabuf = dma_buf_get(fd);
831                 close_fd(fd);
832                 if (WARN_ON_ONCE(IS_ERR(dmabuf)))
833                         return PTR_ERR(dmabuf);
834                 mem->dmabuf = dmabuf;
835         }
836
837         return 0;
838 }
839
840 static int
841 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
842                       struct amdgpu_bo **bo)
843 {
844         struct drm_gem_object *gobj;
845         int ret;
846
847         ret = kfd_mem_export_dmabuf(mem);
848         if (ret)
849                 return ret;
850
851         gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
852         if (IS_ERR(gobj))
853                 return PTR_ERR(gobj);
854
855         *bo = gem_to_amdgpu_bo(gobj);
856         (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
857
858         return 0;
859 }
860
861 /* kfd_mem_attach - Add a BO to a VM
862  *
863  * Everything that needs to bo done only once when a BO is first added
864  * to a VM. It can later be mapped and unmapped many times without
865  * repeating these steps.
866  *
867  * 0. Create BO for DMA mapping, if needed
868  * 1. Allocate and initialize BO VA entry data structure
869  * 2. Add BO to the VM
870  * 3. Determine ASIC-specific PTE flags
871  * 4. Alloc page tables and directories if needed
872  * 4a.  Validate new page tables and directories
873  */
874 static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
875                 struct amdgpu_vm *vm, bool is_aql)
876 {
877         struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
878         unsigned long bo_size = mem->bo->tbo.base.size;
879         uint64_t va = mem->va;
880         struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
881         struct amdgpu_bo *bo[2] = {NULL, NULL};
882         struct amdgpu_bo_va *bo_va;
883         bool same_hive = false;
884         int i, ret;
885
886         if (!va) {
887                 pr_err("Invalid VA when adding BO to VM\n");
888                 return -EINVAL;
889         }
890
891         /* Determine access to VRAM, MMIO and DOORBELL BOs of peer devices
892          *
893          * The access path of MMIO and DOORBELL BOs of is always over PCIe.
894          * In contrast the access path of VRAM BOs depens upon the type of
895          * link that connects the peer device. Access over PCIe is allowed
896          * if peer device has large BAR. In contrast, access over xGMI is
897          * allowed for both small and large BAR configurations of peer device
898          */
899         if ((adev != bo_adev && !(adev->flags & AMD_IS_APU)) &&
900             ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
901              (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
902              (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
903                 if (mem->domain == AMDGPU_GEM_DOMAIN_VRAM)
904                         same_hive = amdgpu_xgmi_same_hive(adev, bo_adev);
905                 if (!same_hive && !amdgpu_device_is_peer_accessible(bo_adev, adev))
906                         return -EINVAL;
907         }
908
909         for (i = 0; i <= is_aql; i++) {
910                 attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
911                 if (unlikely(!attachment[i])) {
912                         ret = -ENOMEM;
913                         goto unwind;
914                 }
915
916                 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
917                          va + bo_size, vm);
918
919                 if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) ||
920                     (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && reuse_dmamap(adev, bo_adev)) ||
921                     (mem->domain == AMDGPU_GEM_DOMAIN_GTT && reuse_dmamap(adev, bo_adev)) ||
922                     same_hive) {
923                         /* Mappings on the local GPU, or VRAM mappings in the
924                          * local hive, or userptr, or GTT mapping can reuse dma map
925                          * address space share the original BO
926                          */
927                         attachment[i]->type = KFD_MEM_ATT_SHARED;
928                         bo[i] = mem->bo;
929                         drm_gem_object_get(&bo[i]->tbo.base);
930                 } else if (i > 0) {
931                         /* Multiple mappings on the same GPU share the BO */
932                         attachment[i]->type = KFD_MEM_ATT_SHARED;
933                         bo[i] = bo[0];
934                         drm_gem_object_get(&bo[i]->tbo.base);
935                 } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
936                         /* Create an SG BO to DMA-map userptrs on other GPUs */
937                         attachment[i]->type = KFD_MEM_ATT_USERPTR;
938                         ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
939                         if (ret)
940                                 goto unwind;
941                 /* Handle DOORBELL BOs of peer devices and MMIO BOs of local and peer devices */
942                 } else if (mem->bo->tbo.type == ttm_bo_type_sg) {
943                         WARN_ONCE(!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL ||
944                                     mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP),
945                                   "Handing invalid SG BO in ATTACH request");
946                         attachment[i]->type = KFD_MEM_ATT_SG;
947                         ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
948                         if (ret)
949                                 goto unwind;
950                 /* Enable acces to GTT and VRAM BOs of peer devices */
951                 } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT ||
952                            mem->domain == AMDGPU_GEM_DOMAIN_VRAM) {
953                         attachment[i]->type = KFD_MEM_ATT_DMABUF;
954                         ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
955                         if (ret)
956                                 goto unwind;
957                         pr_debug("Employ DMABUF mechanism to enable peer GPU access\n");
958                 } else {
959                         WARN_ONCE(true, "Handling invalid ATTACH request");
960                         ret = -EINVAL;
961                         goto unwind;
962                 }
963
964                 /* Add BO to VM internal data structures */
965                 ret = amdgpu_bo_reserve(bo[i], false);
966                 if (ret) {
967                         pr_debug("Unable to reserve BO during memory attach");
968                         goto unwind;
969                 }
970                 bo_va = amdgpu_vm_bo_find(vm, bo[i]);
971                 if (!bo_va)
972                         bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
973                 else
974                         ++bo_va->ref_count;
975                 attachment[i]->bo_va = bo_va;
976                 amdgpu_bo_unreserve(bo[i]);
977                 if (unlikely(!attachment[i]->bo_va)) {
978                         ret = -ENOMEM;
979                         pr_err("Failed to add BO object to VM. ret == %d\n",
980                                ret);
981                         goto unwind;
982                 }
983                 attachment[i]->va = va;
984                 attachment[i]->pte_flags = get_pte_flags(adev, mem);
985                 attachment[i]->adev = adev;
986                 list_add(&attachment[i]->list, &mem->attachments);
987
988                 va += bo_size;
989         }
990
991         return 0;
992
993 unwind:
994         for (; i >= 0; i--) {
995                 if (!attachment[i])
996                         continue;
997                 if (attachment[i]->bo_va) {
998                         amdgpu_bo_reserve(bo[i], true);
999                         if (--attachment[i]->bo_va->ref_count == 0)
1000                                 amdgpu_vm_bo_del(adev, attachment[i]->bo_va);
1001                         amdgpu_bo_unreserve(bo[i]);
1002                         list_del(&attachment[i]->list);
1003                 }
1004                 if (bo[i])
1005                         drm_gem_object_put(&bo[i]->tbo.base);
1006                 kfree(attachment[i]);
1007         }
1008         return ret;
1009 }
1010
1011 static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
1012 {
1013         struct amdgpu_bo *bo = attachment->bo_va->base.bo;
1014
1015         pr_debug("\t remove VA 0x%llx in entry %p\n",
1016                         attachment->va, attachment);
1017         if (--attachment->bo_va->ref_count == 0)
1018                 amdgpu_vm_bo_del(attachment->adev, attachment->bo_va);
1019         drm_gem_object_put(&bo->tbo.base);
1020         list_del(&attachment->list);
1021         kfree(attachment);
1022 }
1023
1024 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
1025                                 struct amdkfd_process_info *process_info,
1026                                 bool userptr)
1027 {
1028         mutex_lock(&process_info->lock);
1029         if (userptr)
1030                 list_add_tail(&mem->validate_list,
1031                               &process_info->userptr_valid_list);
1032         else
1033                 list_add_tail(&mem->validate_list, &process_info->kfd_bo_list);
1034         mutex_unlock(&process_info->lock);
1035 }
1036
1037 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
1038                 struct amdkfd_process_info *process_info)
1039 {
1040         mutex_lock(&process_info->lock);
1041         list_del(&mem->validate_list);
1042         mutex_unlock(&process_info->lock);
1043 }
1044
1045 /* Initializes user pages. It registers the MMU notifier and validates
1046  * the userptr BO in the GTT domain.
1047  *
1048  * The BO must already be on the userptr_valid_list. Otherwise an
1049  * eviction and restore may happen that leaves the new BO unmapped
1050  * with the user mode queues running.
1051  *
1052  * Takes the process_info->lock to protect against concurrent restore
1053  * workers.
1054  *
1055  * Returns 0 for success, negative errno for errors.
1056  */
1057 static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
1058                            bool criu_resume)
1059 {
1060         struct amdkfd_process_info *process_info = mem->process_info;
1061         struct amdgpu_bo *bo = mem->bo;
1062         struct ttm_operation_ctx ctx = { true, false };
1063         struct hmm_range *range;
1064         int ret = 0;
1065
1066         mutex_lock(&process_info->lock);
1067
1068         ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
1069         if (ret) {
1070                 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
1071                 goto out;
1072         }
1073
1074         ret = amdgpu_hmm_register(bo, user_addr);
1075         if (ret) {
1076                 pr_err("%s: Failed to register MMU notifier: %d\n",
1077                        __func__, ret);
1078                 goto out;
1079         }
1080
1081         if (criu_resume) {
1082                 /*
1083                  * During a CRIU restore operation, the userptr buffer objects
1084                  * will be validated in the restore_userptr_work worker at a
1085                  * later stage when it is scheduled by another ioctl called by
1086                  * CRIU master process for the target pid for restore.
1087                  */
1088                 mutex_lock(&process_info->notifier_lock);
1089                 mem->invalid++;
1090                 mutex_unlock(&process_info->notifier_lock);
1091                 mutex_unlock(&process_info->lock);
1092                 return 0;
1093         }
1094
1095         ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
1096         if (ret) {
1097                 if (ret == -EAGAIN)
1098                         pr_debug("Failed to get user pages, try again\n");
1099                 else
1100                         pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
1101                 goto unregister_out;
1102         }
1103
1104         ret = amdgpu_bo_reserve(bo, true);
1105         if (ret) {
1106                 pr_err("%s: Failed to reserve BO\n", __func__);
1107                 goto release_out;
1108         }
1109         amdgpu_bo_placement_from_domain(bo, mem->domain);
1110         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1111         if (ret)
1112                 pr_err("%s: failed to validate BO\n", __func__);
1113         amdgpu_bo_unreserve(bo);
1114
1115 release_out:
1116         amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
1117 unregister_out:
1118         if (ret)
1119                 amdgpu_hmm_unregister(bo);
1120 out:
1121         mutex_unlock(&process_info->lock);
1122         return ret;
1123 }
1124
1125 /* Reserving a BO and its page table BOs must happen atomically to
1126  * avoid deadlocks. Some operations update multiple VMs at once. Track
1127  * all the reservation info in a context structure. Optionally a sync
1128  * object can track VM updates.
1129  */
1130 struct bo_vm_reservation_context {
1131         /* DRM execution context for the reservation */
1132         struct drm_exec exec;
1133         /* Number of VMs reserved */
1134         unsigned int n_vms;
1135         /* Pointer to sync object */
1136         struct amdgpu_sync *sync;
1137 };
1138
1139 enum bo_vm_match {
1140         BO_VM_NOT_MAPPED = 0,   /* Match VMs where a BO is not mapped */
1141         BO_VM_MAPPED,           /* Match VMs where a BO is mapped     */
1142         BO_VM_ALL,              /* Match all VMs a BO was added to    */
1143 };
1144
1145 /**
1146  * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
1147  * @mem: KFD BO structure.
1148  * @vm: the VM to reserve.
1149  * @ctx: the struct that will be used in unreserve_bo_and_vms().
1150  */
1151 static int reserve_bo_and_vm(struct kgd_mem *mem,
1152                               struct amdgpu_vm *vm,
1153                               struct bo_vm_reservation_context *ctx)
1154 {
1155         struct amdgpu_bo *bo = mem->bo;
1156         int ret;
1157
1158         WARN_ON(!vm);
1159
1160         ctx->n_vms = 1;
1161         ctx->sync = &mem->sync;
1162         drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
1163         drm_exec_until_all_locked(&ctx->exec) {
1164                 ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
1165                 drm_exec_retry_on_contention(&ctx->exec);
1166                 if (unlikely(ret))
1167                         goto error;
1168
1169                 ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
1170                 drm_exec_retry_on_contention(&ctx->exec);
1171                 if (unlikely(ret))
1172                         goto error;
1173         }
1174         return 0;
1175
1176 error:
1177         pr_err("Failed to reserve buffers in ttm.\n");
1178         drm_exec_fini(&ctx->exec);
1179         return ret;
1180 }
1181
1182 /**
1183  * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
1184  * @mem: KFD BO structure.
1185  * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
1186  * is used. Otherwise, a single VM associated with the BO.
1187  * @map_type: the mapping status that will be used to filter the VMs.
1188  * @ctx: the struct that will be used in unreserve_bo_and_vms().
1189  *
1190  * Returns 0 for success, negative for failure.
1191  */
1192 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
1193                                 struct amdgpu_vm *vm, enum bo_vm_match map_type,
1194                                 struct bo_vm_reservation_context *ctx)
1195 {
1196         struct kfd_mem_attachment *entry;
1197         struct amdgpu_bo *bo = mem->bo;
1198         int ret;
1199
1200         ctx->sync = &mem->sync;
1201         drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
1202                       DRM_EXEC_IGNORE_DUPLICATES, 0);
1203         drm_exec_until_all_locked(&ctx->exec) {
1204                 ctx->n_vms = 0;
1205                 list_for_each_entry(entry, &mem->attachments, list) {
1206                         if ((vm && vm != entry->bo_va->base.vm) ||
1207                                 (entry->is_mapped != map_type
1208                                 && map_type != BO_VM_ALL))
1209                                 continue;
1210
1211                         ret = amdgpu_vm_lock_pd(entry->bo_va->base.vm,
1212                                                 &ctx->exec, 2);
1213                         drm_exec_retry_on_contention(&ctx->exec);
1214                         if (unlikely(ret))
1215                                 goto error;
1216                         ++ctx->n_vms;
1217                 }
1218
1219                 ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
1220                 drm_exec_retry_on_contention(&ctx->exec);
1221                 if (unlikely(ret))
1222                         goto error;
1223         }
1224         return 0;
1225
1226 error:
1227         pr_err("Failed to reserve buffers in ttm.\n");
1228         drm_exec_fini(&ctx->exec);
1229         return ret;
1230 }
1231
1232 /**
1233  * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
1234  * @ctx: Reservation context to unreserve
1235  * @wait: Optionally wait for a sync object representing pending VM updates
1236  * @intr: Whether the wait is interruptible
1237  *
1238  * Also frees any resources allocated in
1239  * reserve_bo_and_(cond_)vm(s). Returns the status from
1240  * amdgpu_sync_wait.
1241  */
1242 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
1243                                  bool wait, bool intr)
1244 {
1245         int ret = 0;
1246
1247         if (wait)
1248                 ret = amdgpu_sync_wait(ctx->sync, intr);
1249
1250         drm_exec_fini(&ctx->exec);
1251         ctx->sync = NULL;
1252         return ret;
1253 }
1254
1255 static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
1256                                 struct kfd_mem_attachment *entry,
1257                                 struct amdgpu_sync *sync)
1258 {
1259         struct amdgpu_bo_va *bo_va = entry->bo_va;
1260         struct amdgpu_device *adev = entry->adev;
1261         struct amdgpu_vm *vm = bo_va->base.vm;
1262
1263         amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
1264
1265         amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
1266
1267         amdgpu_sync_fence(sync, bo_va->last_pt_update);
1268 }
1269
1270 static int update_gpuvm_pte(struct kgd_mem *mem,
1271                             struct kfd_mem_attachment *entry,
1272                             struct amdgpu_sync *sync)
1273 {
1274         struct amdgpu_bo_va *bo_va = entry->bo_va;
1275         struct amdgpu_device *adev = entry->adev;
1276         int ret;
1277
1278         ret = kfd_mem_dmamap_attachment(mem, entry);
1279         if (ret)
1280                 return ret;
1281
1282         /* Update the page tables  */
1283         ret = amdgpu_vm_bo_update(adev, bo_va, false);
1284         if (ret) {
1285                 pr_err("amdgpu_vm_bo_update failed\n");
1286                 return ret;
1287         }
1288
1289         return amdgpu_sync_fence(sync, bo_va->last_pt_update);
1290 }
1291
1292 static int map_bo_to_gpuvm(struct kgd_mem *mem,
1293                            struct kfd_mem_attachment *entry,
1294                            struct amdgpu_sync *sync,
1295                            bool no_update_pte)
1296 {
1297         int ret;
1298
1299         /* Set virtual address for the allocation */
1300         ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
1301                                amdgpu_bo_size(entry->bo_va->base.bo),
1302                                entry->pte_flags);
1303         if (ret) {
1304                 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
1305                                 entry->va, ret);
1306                 return ret;
1307         }
1308
1309         if (no_update_pte)
1310                 return 0;
1311
1312         ret = update_gpuvm_pte(mem, entry, sync);
1313         if (ret) {
1314                 pr_err("update_gpuvm_pte() failed\n");
1315                 goto update_gpuvm_pte_failed;
1316         }
1317
1318         return 0;
1319
1320 update_gpuvm_pte_failed:
1321         unmap_bo_from_gpuvm(mem, entry, sync);
1322         kfd_mem_dmaunmap_attachment(mem, entry);
1323         return ret;
1324 }
1325
1326 static int process_validate_vms(struct amdkfd_process_info *process_info,
1327                                 struct ww_acquire_ctx *ticket)
1328 {
1329         struct amdgpu_vm *peer_vm;
1330         int ret;
1331
1332         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1333                             vm_list_node) {
1334                 ret = vm_validate_pt_pd_bos(peer_vm, ticket);
1335                 if (ret)
1336                         return ret;
1337         }
1338
1339         return 0;
1340 }
1341
1342 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
1343                                  struct amdgpu_sync *sync)
1344 {
1345         struct amdgpu_vm *peer_vm;
1346         int ret;
1347
1348         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1349                             vm_list_node) {
1350                 struct amdgpu_bo *pd = peer_vm->root.bo;
1351
1352                 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1353                                        AMDGPU_SYNC_NE_OWNER,
1354                                        AMDGPU_FENCE_OWNER_KFD);
1355                 if (ret)
1356                         return ret;
1357         }
1358
1359         return 0;
1360 }
1361
1362 static int process_update_pds(struct amdkfd_process_info *process_info,
1363                               struct amdgpu_sync *sync)
1364 {
1365         struct amdgpu_vm *peer_vm;
1366         int ret;
1367
1368         list_for_each_entry(peer_vm, &process_info->vm_list_head,
1369                             vm_list_node) {
1370                 ret = vm_update_pds(peer_vm, sync);
1371                 if (ret)
1372                         return ret;
1373         }
1374
1375         return 0;
1376 }
1377
1378 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
1379                        struct dma_fence **ef)
1380 {
1381         struct amdkfd_process_info *info = NULL;
1382         int ret;
1383
1384         if (!*process_info) {
1385                 info = kzalloc(sizeof(*info), GFP_KERNEL);
1386                 if (!info)
1387                         return -ENOMEM;
1388
1389                 mutex_init(&info->lock);
1390                 mutex_init(&info->notifier_lock);
1391                 INIT_LIST_HEAD(&info->vm_list_head);
1392                 INIT_LIST_HEAD(&info->kfd_bo_list);
1393                 INIT_LIST_HEAD(&info->userptr_valid_list);
1394                 INIT_LIST_HEAD(&info->userptr_inval_list);
1395
1396                 info->eviction_fence =
1397                         amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
1398                                                    current->mm,
1399                                                    NULL);
1400                 if (!info->eviction_fence) {
1401                         pr_err("Failed to create eviction fence\n");
1402                         ret = -ENOMEM;
1403                         goto create_evict_fence_fail;
1404                 }
1405
1406                 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
1407                 INIT_DELAYED_WORK(&info->restore_userptr_work,
1408                                   amdgpu_amdkfd_restore_userptr_worker);
1409
1410                 *process_info = info;
1411         }
1412
1413         vm->process_info = *process_info;
1414
1415         /* Validate page directory and attach eviction fence */
1416         ret = amdgpu_bo_reserve(vm->root.bo, true);
1417         if (ret)
1418                 goto reserve_pd_fail;
1419         ret = vm_validate_pt_pd_bos(vm, NULL);
1420         if (ret) {
1421                 pr_err("validate_pt_pd_bos() failed\n");
1422                 goto validate_pd_fail;
1423         }
1424         ret = amdgpu_bo_sync_wait(vm->root.bo,
1425                                   AMDGPU_FENCE_OWNER_KFD, false);
1426         if (ret)
1427                 goto wait_pd_fail;
1428         ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
1429         if (ret)
1430                 goto reserve_shared_fail;
1431         dma_resv_add_fence(vm->root.bo->tbo.base.resv,
1432                            &vm->process_info->eviction_fence->base,
1433                            DMA_RESV_USAGE_BOOKKEEP);
1434         amdgpu_bo_unreserve(vm->root.bo);
1435
1436         /* Update process info */
1437         mutex_lock(&vm->process_info->lock);
1438         list_add_tail(&vm->vm_list_node,
1439                         &(vm->process_info->vm_list_head));
1440         vm->process_info->n_vms++;
1441
1442         *ef = dma_fence_get(&vm->process_info->eviction_fence->base);
1443         mutex_unlock(&vm->process_info->lock);
1444
1445         return 0;
1446
1447 reserve_shared_fail:
1448 wait_pd_fail:
1449 validate_pd_fail:
1450         amdgpu_bo_unreserve(vm->root.bo);
1451 reserve_pd_fail:
1452         vm->process_info = NULL;
1453         if (info) {
1454                 dma_fence_put(&info->eviction_fence->base);
1455                 *process_info = NULL;
1456                 put_pid(info->pid);
1457 create_evict_fence_fail:
1458                 mutex_destroy(&info->lock);
1459                 mutex_destroy(&info->notifier_lock);
1460                 kfree(info);
1461         }
1462         return ret;
1463 }
1464
1465 /**
1466  * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria
1467  * @bo: Handle of buffer object being pinned
1468  * @domain: Domain into which BO should be pinned
1469  *
1470  *   - USERPTR BOs are UNPINNABLE and will return error
1471  *   - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1472  *     PIN count incremented. It is valid to PIN a BO multiple times
1473  *
1474  * Return: ZERO if successful in pinning, Non-Zero in case of error.
1475  */
1476 static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain)
1477 {
1478         int ret = 0;
1479
1480         ret = amdgpu_bo_reserve(bo, false);
1481         if (unlikely(ret))
1482                 return ret;
1483
1484         if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) {
1485                 /*
1486                  * If bo is not contiguous on VRAM, move to system memory first to ensure
1487                  * we can get contiguous VRAM space after evicting other BOs.
1488                  */
1489                 if (!(bo->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) {
1490                         struct ttm_operation_ctx ctx = { true, false };
1491
1492                         amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
1493                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1494                         if (unlikely(ret)) {
1495                                 pr_debug("validate bo 0x%p to GTT failed %d\n", &bo->tbo, ret);
1496                                 goto out;
1497                         }
1498                 }
1499         }
1500
1501         ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0);
1502         if (ret)
1503                 pr_err("Error in Pinning BO to domain: %d\n", domain);
1504
1505         amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
1506 out:
1507         amdgpu_bo_unreserve(bo);
1508         return ret;
1509 }
1510
1511 /**
1512  * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria
1513  * @bo: Handle of buffer object being unpinned
1514  *
1515  *   - Is a illegal request for USERPTR BOs and is ignored
1516  *   - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
1517  *     PIN count decremented. Calls to UNPIN must balance calls to PIN
1518  */
1519 static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
1520 {
1521         int ret = 0;
1522
1523         ret = amdgpu_bo_reserve(bo, false);
1524         if (unlikely(ret))
1525                 return;
1526
1527         amdgpu_bo_unpin(bo);
1528         amdgpu_bo_unreserve(bo);
1529 }
1530
1531 int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
1532                                      struct amdgpu_vm *avm, u32 pasid)
1533
1534 {
1535         int ret;
1536
1537         /* Free the original amdgpu allocated pasid,
1538          * will be replaced with kfd allocated pasid.
1539          */
1540         if (avm->pasid) {
1541                 amdgpu_pasid_free(avm->pasid);
1542                 amdgpu_vm_set_pasid(adev, avm, 0);
1543         }
1544
1545         ret = amdgpu_vm_set_pasid(adev, avm, pasid);
1546         if (ret)
1547                 return ret;
1548
1549         return 0;
1550 }
1551
1552 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
1553                                            struct amdgpu_vm *avm,
1554                                            void **process_info,
1555                                            struct dma_fence **ef)
1556 {
1557         int ret;
1558
1559         /* Already a compute VM? */
1560         if (avm->process_info)
1561                 return -EINVAL;
1562
1563         /* Convert VM into a compute VM */
1564         ret = amdgpu_vm_make_compute(adev, avm);
1565         if (ret)
1566                 return ret;
1567
1568         /* Initialize KFD part of the VM and process info */
1569         ret = init_kfd_vm(avm, process_info, ef);
1570         if (ret)
1571                 return ret;
1572
1573         amdgpu_vm_set_task_info(avm);
1574
1575         return 0;
1576 }
1577
1578 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1579                                     struct amdgpu_vm *vm)
1580 {
1581         struct amdkfd_process_info *process_info = vm->process_info;
1582
1583         if (!process_info)
1584                 return;
1585
1586         /* Update process info */
1587         mutex_lock(&process_info->lock);
1588         process_info->n_vms--;
1589         list_del(&vm->vm_list_node);
1590         mutex_unlock(&process_info->lock);
1591
1592         vm->process_info = NULL;
1593
1594         /* Release per-process resources when last compute VM is destroyed */
1595         if (!process_info->n_vms) {
1596                 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1597                 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1598                 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1599
1600                 dma_fence_put(&process_info->eviction_fence->base);
1601                 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1602                 put_pid(process_info->pid);
1603                 mutex_destroy(&process_info->lock);
1604                 mutex_destroy(&process_info->notifier_lock);
1605                 kfree(process_info);
1606         }
1607 }
1608
1609 void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
1610                                             void *drm_priv)
1611 {
1612         struct amdgpu_vm *avm;
1613
1614         if (WARN_ON(!adev || !drm_priv))
1615                 return;
1616
1617         avm = drm_priv_to_vm(drm_priv);
1618
1619         pr_debug("Releasing process vm %p\n", avm);
1620
1621         /* The original pasid of amdgpu vm has already been
1622          * released during making a amdgpu vm to a compute vm
1623          * The current pasid is managed by kfd and will be
1624          * released on kfd process destroy. Set amdgpu pasid
1625          * to 0 to avoid duplicate release.
1626          */
1627         amdgpu_vm_release_compute(adev, avm);
1628 }
1629
1630 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
1631 {
1632         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1633         struct amdgpu_bo *pd = avm->root.bo;
1634         struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1635
1636         if (adev->asic_type < CHIP_VEGA10)
1637                 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1638         return avm->pd_phys_addr;
1639 }
1640
1641 void amdgpu_amdkfd_block_mmu_notifications(void *p)
1642 {
1643         struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
1644
1645         mutex_lock(&pinfo->lock);
1646         WRITE_ONCE(pinfo->block_mmu_notifications, true);
1647         mutex_unlock(&pinfo->lock);
1648 }
1649
1650 int amdgpu_amdkfd_criu_resume(void *p)
1651 {
1652         int ret = 0;
1653         struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
1654
1655         mutex_lock(&pinfo->lock);
1656         pr_debug("scheduling work\n");
1657         mutex_lock(&pinfo->notifier_lock);
1658         pinfo->evicted_bos++;
1659         mutex_unlock(&pinfo->notifier_lock);
1660         if (!READ_ONCE(pinfo->block_mmu_notifications)) {
1661                 ret = -EINVAL;
1662                 goto out_unlock;
1663         }
1664         WRITE_ONCE(pinfo->block_mmu_notifications, false);
1665         queue_delayed_work(system_freezable_wq,
1666                            &pinfo->restore_userptr_work, 0);
1667
1668 out_unlock:
1669         mutex_unlock(&pinfo->lock);
1670         return ret;
1671 }
1672
1673 size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
1674                                           uint8_t xcp_id)
1675 {
1676         uint64_t reserved_for_pt =
1677                 ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
1678         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1679         uint64_t reserved_for_ras = (con ? con->reserved_pages_in_bytes : 0);
1680         ssize_t available;
1681         uint64_t vram_available, system_mem_available, ttm_mem_available;
1682
1683         spin_lock(&kfd_mem_limit.mem_limit_lock);
1684         vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
1685                 - adev->kfd.vram_used_aligned[xcp_id]
1686                 - atomic64_read(&adev->vram_pin_size)
1687                 - reserved_for_pt
1688                 - reserved_for_ras;
1689
1690         if (adev->flags & AMD_IS_APU) {
1691                 system_mem_available = no_system_mem_limit ?
1692                                         kfd_mem_limit.max_system_mem_limit :
1693                                         kfd_mem_limit.max_system_mem_limit -
1694                                         kfd_mem_limit.system_mem_used;
1695
1696                 ttm_mem_available = kfd_mem_limit.max_ttm_mem_limit -
1697                                 kfd_mem_limit.ttm_mem_used;
1698
1699                 available = min3(system_mem_available, ttm_mem_available,
1700                                  vram_available);
1701                 available = ALIGN_DOWN(available, PAGE_SIZE);
1702         } else {
1703                 available = ALIGN_DOWN(vram_available, VRAM_AVAILABLITY_ALIGN);
1704         }
1705
1706         spin_unlock(&kfd_mem_limit.mem_limit_lock);
1707
1708         if (available < 0)
1709                 available = 0;
1710
1711         return available;
1712 }
1713
1714 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1715                 struct amdgpu_device *adev, uint64_t va, uint64_t size,
1716                 void *drm_priv, struct kgd_mem **mem,
1717                 uint64_t *offset, uint32_t flags, bool criu_resume)
1718 {
1719         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
1720         struct amdgpu_fpriv *fpriv = container_of(avm, struct amdgpu_fpriv, vm);
1721         enum ttm_bo_type bo_type = ttm_bo_type_device;
1722         struct sg_table *sg = NULL;
1723         uint64_t user_addr = 0;
1724         struct amdgpu_bo *bo;
1725         struct drm_gem_object *gobj = NULL;
1726         u32 domain, alloc_domain;
1727         uint64_t aligned_size;
1728         int8_t xcp_id = -1;
1729         u64 alloc_flags;
1730         int ret;
1731
1732         /*
1733          * Check on which domain to allocate BO
1734          */
1735         if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
1736                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1737
1738                 if (adev->flags & AMD_IS_APU) {
1739                         domain = AMDGPU_GEM_DOMAIN_GTT;
1740                         alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1741                         alloc_flags = 0;
1742                 } else {
1743                         alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1744                         alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
1745                         AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
1746
1747                         /* For contiguous VRAM allocation */
1748                         if (flags & KFD_IOC_ALLOC_MEM_FLAGS_CONTIGUOUS)
1749                                 alloc_flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1750                 }
1751                 xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ?
1752                                         0 : fpriv->xcp_id;
1753         } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
1754                 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1755                 alloc_flags = 0;
1756         } else {
1757                 domain = AMDGPU_GEM_DOMAIN_GTT;
1758                 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1759                 alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
1760
1761                 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
1762                         if (!offset || !*offset)
1763                                 return -EINVAL;
1764                         user_addr = untagged_addr(*offset);
1765                 } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1766                                     KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1767                         bo_type = ttm_bo_type_sg;
1768                         if (size > UINT_MAX)
1769                                 return -EINVAL;
1770                         sg = create_sg_table(*offset, size);
1771                         if (!sg)
1772                                 return -ENOMEM;
1773                 } else {
1774                         return -EINVAL;
1775                 }
1776         }
1777
1778         if (flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT)
1779                 alloc_flags |= AMDGPU_GEM_CREATE_COHERENT;
1780         if (flags & KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT)
1781                 alloc_flags |= AMDGPU_GEM_CREATE_EXT_COHERENT;
1782         if (flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED)
1783                 alloc_flags |= AMDGPU_GEM_CREATE_UNCACHED;
1784
1785         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1786         if (!*mem) {
1787                 ret = -ENOMEM;
1788                 goto err;
1789         }
1790         INIT_LIST_HEAD(&(*mem)->attachments);
1791         mutex_init(&(*mem)->lock);
1792         (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1793
1794         /* Workaround for AQL queue wraparound bug. Map the same
1795          * memory twice. That means we only actually allocate half
1796          * the memory.
1797          */
1798         if ((*mem)->aql_queue)
1799                 size >>= 1;
1800         aligned_size = PAGE_ALIGN(size);
1801
1802         (*mem)->alloc_flags = flags;
1803
1804         amdgpu_sync_create(&(*mem)->sync);
1805
1806         ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags,
1807                                               xcp_id);
1808         if (ret) {
1809                 pr_debug("Insufficient memory\n");
1810                 goto err_reserve_limit;
1811         }
1812
1813         pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s xcp_id %d\n",
1814                  va, (*mem)->aql_queue ? size << 1 : size,
1815                  domain_string(alloc_domain), xcp_id);
1816
1817         ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags,
1818                                        bo_type, NULL, &gobj, xcp_id + 1);
1819         if (ret) {
1820                 pr_debug("Failed to create BO on domain %s. ret %d\n",
1821                          domain_string(alloc_domain), ret);
1822                 goto err_bo_create;
1823         }
1824         ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
1825         if (ret) {
1826                 pr_debug("Failed to allow vma node access. ret %d\n", ret);
1827                 goto err_node_allow;
1828         }
1829         ret = drm_gem_handle_create(adev->kfd.client.file, gobj, &(*mem)->gem_handle);
1830         if (ret)
1831                 goto err_gem_handle_create;
1832         bo = gem_to_amdgpu_bo(gobj);
1833         if (bo_type == ttm_bo_type_sg) {
1834                 bo->tbo.sg = sg;
1835                 bo->tbo.ttm->sg = sg;
1836         }
1837         bo->kfd_bo = *mem;
1838         (*mem)->bo = bo;
1839         if (user_addr)
1840                 bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
1841
1842         (*mem)->va = va;
1843         (*mem)->domain = domain;
1844         (*mem)->mapped_to_gpu_memory = 0;
1845         (*mem)->process_info = avm->process_info;
1846
1847         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1848
1849         if (user_addr) {
1850                 pr_debug("creating userptr BO for user_addr = %llx\n", user_addr);
1851                 ret = init_user_pages(*mem, user_addr, criu_resume);
1852                 if (ret)
1853                         goto allocate_init_user_pages_failed;
1854         } else  if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1855                                 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1856                 ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT);
1857                 if (ret) {
1858                         pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n");
1859                         goto err_pin_bo;
1860                 }
1861                 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
1862                 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
1863         } else {
1864                 mutex_lock(&avm->process_info->lock);
1865                 if (avm->process_info->eviction_fence &&
1866                     !dma_fence_is_signaled(&avm->process_info->eviction_fence->base))
1867                         ret = amdgpu_amdkfd_bo_validate_and_fence(bo, domain,
1868                                 &avm->process_info->eviction_fence->base);
1869                 mutex_unlock(&avm->process_info->lock);
1870                 if (ret)
1871                         goto err_validate_bo;
1872         }
1873
1874         if (offset)
1875                 *offset = amdgpu_bo_mmap_offset(bo);
1876
1877         return 0;
1878
1879 allocate_init_user_pages_failed:
1880 err_pin_bo:
1881 err_validate_bo:
1882         remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1883         drm_gem_handle_delete(adev->kfd.client.file, (*mem)->gem_handle);
1884 err_gem_handle_create:
1885         drm_vma_node_revoke(&gobj->vma_node, drm_priv);
1886 err_node_allow:
1887         /* Don't unreserve system mem limit twice */
1888         goto err_reserve_limit;
1889 err_bo_create:
1890         amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
1891 err_reserve_limit:
1892         amdgpu_sync_free(&(*mem)->sync);
1893         mutex_destroy(&(*mem)->lock);
1894         if (gobj)
1895                 drm_gem_object_put(gobj);
1896         else
1897                 kfree(*mem);
1898 err:
1899         if (sg) {
1900                 sg_free_table(sg);
1901                 kfree(sg);
1902         }
1903         return ret;
1904 }
1905
1906 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1907                 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
1908                 uint64_t *size)
1909 {
1910         struct amdkfd_process_info *process_info = mem->process_info;
1911         unsigned long bo_size = mem->bo->tbo.base.size;
1912         bool use_release_notifier = (mem->bo->kfd_bo == mem);
1913         struct kfd_mem_attachment *entry, *tmp;
1914         struct bo_vm_reservation_context ctx;
1915         unsigned int mapped_to_gpu_memory;
1916         int ret;
1917         bool is_imported = false;
1918
1919         mutex_lock(&mem->lock);
1920
1921         /* Unpin MMIO/DOORBELL BO's that were pinned during allocation */
1922         if (mem->alloc_flags &
1923             (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
1924              KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1925                 amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo);
1926         }
1927
1928         mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
1929         is_imported = mem->is_imported;
1930         mutex_unlock(&mem->lock);
1931         /* lock is not needed after this, since mem is unused and will
1932          * be freed anyway
1933          */
1934
1935         if (mapped_to_gpu_memory > 0) {
1936                 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1937                                 mem->va, bo_size);
1938                 return -EBUSY;
1939         }
1940
1941         /* Make sure restore workers don't access the BO any more */
1942         mutex_lock(&process_info->lock);
1943         list_del(&mem->validate_list);
1944         mutex_unlock(&process_info->lock);
1945
1946         /* Cleanup user pages and MMU notifiers */
1947         if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
1948                 amdgpu_hmm_unregister(mem->bo);
1949                 mutex_lock(&process_info->notifier_lock);
1950                 amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
1951                 mutex_unlock(&process_info->notifier_lock);
1952         }
1953
1954         ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1955         if (unlikely(ret))
1956                 return ret;
1957
1958         amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1959                                         process_info->eviction_fence);
1960         pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1961                 mem->va + bo_size * (1 + mem->aql_queue));
1962
1963         /* Remove from VM internal data structures */
1964         list_for_each_entry_safe(entry, tmp, &mem->attachments, list) {
1965                 kfd_mem_dmaunmap_attachment(mem, entry);
1966                 kfd_mem_detach(entry);
1967         }
1968
1969         ret = unreserve_bo_and_vms(&ctx, false, false);
1970
1971         /* Free the sync object */
1972         amdgpu_sync_free(&mem->sync);
1973
1974         /* If the SG is not NULL, it's one we created for a doorbell or mmio
1975          * remap BO. We need to free it.
1976          */
1977         if (mem->bo->tbo.sg) {
1978                 sg_free_table(mem->bo->tbo.sg);
1979                 kfree(mem->bo->tbo.sg);
1980         }
1981
1982         /* Update the size of the BO being freed if it was allocated from
1983          * VRAM and is not imported. For APP APU VRAM allocations are done
1984          * in GTT domain
1985          */
1986         if (size) {
1987                 if (!is_imported &&
1988                    (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
1989                    ((adev->flags & AMD_IS_APU) &&
1990                     mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
1991                         *size = bo_size;
1992                 else
1993                         *size = 0;
1994         }
1995
1996         /* Free the BO*/
1997         drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
1998         drm_gem_handle_delete(adev->kfd.client.file, mem->gem_handle);
1999         if (mem->dmabuf) {
2000                 dma_buf_put(mem->dmabuf);
2001                 mem->dmabuf = NULL;
2002         }
2003         mutex_destroy(&mem->lock);
2004
2005         /* If this releases the last reference, it will end up calling
2006          * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why
2007          * this needs to be the last call here.
2008          */
2009         drm_gem_object_put(&mem->bo->tbo.base);
2010
2011         /*
2012          * For kgd_mem allocated in amdgpu_amdkfd_gpuvm_import_dmabuf(),
2013          * explicitly free it here.
2014          */
2015         if (!use_release_notifier)
2016                 kfree(mem);
2017
2018         return ret;
2019 }
2020
2021 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
2022                 struct amdgpu_device *adev, struct kgd_mem *mem,
2023                 void *drm_priv)
2024 {
2025         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
2026         int ret;
2027         struct amdgpu_bo *bo;
2028         uint32_t domain;
2029         struct kfd_mem_attachment *entry;
2030         struct bo_vm_reservation_context ctx;
2031         unsigned long bo_size;
2032         bool is_invalid_userptr = false;
2033
2034         bo = mem->bo;
2035         if (!bo) {
2036                 pr_err("Invalid BO when mapping memory to GPU\n");
2037                 return -EINVAL;
2038         }
2039
2040         /* Make sure restore is not running concurrently. Since we
2041          * don't map invalid userptr BOs, we rely on the next restore
2042          * worker to do the mapping
2043          */
2044         mutex_lock(&mem->process_info->lock);
2045
2046         /* Lock notifier lock. If we find an invalid userptr BO, we can be
2047          * sure that the MMU notifier is no longer running
2048          * concurrently and the queues are actually stopped
2049          */
2050         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
2051                 mutex_lock(&mem->process_info->notifier_lock);
2052                 is_invalid_userptr = !!mem->invalid;
2053                 mutex_unlock(&mem->process_info->notifier_lock);
2054         }
2055
2056         mutex_lock(&mem->lock);
2057
2058         domain = mem->domain;
2059         bo_size = bo->tbo.base.size;
2060
2061         pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
2062                         mem->va,
2063                         mem->va + bo_size * (1 + mem->aql_queue),
2064                         avm, domain_string(domain));
2065
2066         if (!kfd_mem_is_attached(avm, mem)) {
2067                 ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
2068                 if (ret)
2069                         goto out;
2070         }
2071
2072         ret = reserve_bo_and_vm(mem, avm, &ctx);
2073         if (unlikely(ret))
2074                 goto out;
2075
2076         /* Userptr can be marked as "not invalid", but not actually be
2077          * validated yet (still in the system domain). In that case
2078          * the queues are still stopped and we can leave mapping for
2079          * the next restore worker
2080          */
2081         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
2082             bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
2083                 is_invalid_userptr = true;
2084
2085         ret = vm_validate_pt_pd_bos(avm, NULL);
2086         if (unlikely(ret))
2087                 goto out_unreserve;
2088
2089         list_for_each_entry(entry, &mem->attachments, list) {
2090                 if (entry->bo_va->base.vm != avm || entry->is_mapped)
2091                         continue;
2092
2093                 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
2094                          entry->va, entry->va + bo_size, entry);
2095
2096                 ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
2097                                       is_invalid_userptr);
2098                 if (ret) {
2099                         pr_err("Failed to map bo to gpuvm\n");
2100                         goto out_unreserve;
2101                 }
2102
2103                 ret = vm_update_pds(avm, ctx.sync);
2104                 if (ret) {
2105                         pr_err("Failed to update page directories\n");
2106                         goto out_unreserve;
2107                 }
2108
2109                 entry->is_mapped = true;
2110                 mem->mapped_to_gpu_memory++;
2111                 pr_debug("\t INC mapping count %d\n",
2112                          mem->mapped_to_gpu_memory);
2113         }
2114
2115         ret = unreserve_bo_and_vms(&ctx, false, false);
2116
2117         goto out;
2118
2119 out_unreserve:
2120         unreserve_bo_and_vms(&ctx, false, false);
2121 out:
2122         mutex_unlock(&mem->process_info->lock);
2123         mutex_unlock(&mem->lock);
2124         return ret;
2125 }
2126
2127 int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
2128 {
2129         struct kfd_mem_attachment *entry;
2130         struct amdgpu_vm *vm;
2131         int ret;
2132
2133         vm = drm_priv_to_vm(drm_priv);
2134
2135         mutex_lock(&mem->lock);
2136
2137         ret = amdgpu_bo_reserve(mem->bo, true);
2138         if (ret)
2139                 goto out;
2140
2141         list_for_each_entry(entry, &mem->attachments, list) {
2142                 if (entry->bo_va->base.vm != vm)
2143                         continue;
2144                 if (entry->bo_va->base.bo->tbo.ttm &&
2145                     !entry->bo_va->base.bo->tbo.ttm->sg)
2146                         continue;
2147
2148                 kfd_mem_dmaunmap_attachment(mem, entry);
2149         }
2150
2151         amdgpu_bo_unreserve(mem->bo);
2152 out:
2153         mutex_unlock(&mem->lock);
2154
2155         return ret;
2156 }
2157
2158 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
2159                 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
2160 {
2161         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
2162         unsigned long bo_size = mem->bo->tbo.base.size;
2163         struct kfd_mem_attachment *entry;
2164         struct bo_vm_reservation_context ctx;
2165         int ret;
2166
2167         mutex_lock(&mem->lock);
2168
2169         ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
2170         if (unlikely(ret))
2171                 goto out;
2172         /* If no VMs were reserved, it means the BO wasn't actually mapped */
2173         if (ctx.n_vms == 0) {
2174                 ret = -EINVAL;
2175                 goto unreserve_out;
2176         }
2177
2178         ret = vm_validate_pt_pd_bos(avm, NULL);
2179         if (unlikely(ret))
2180                 goto unreserve_out;
2181
2182         pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
2183                 mem->va,
2184                 mem->va + bo_size * (1 + mem->aql_queue),
2185                 avm);
2186
2187         list_for_each_entry(entry, &mem->attachments, list) {
2188                 if (entry->bo_va->base.vm != avm || !entry->is_mapped)
2189                         continue;
2190
2191                 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
2192                          entry->va, entry->va + bo_size, entry);
2193
2194                 unmap_bo_from_gpuvm(mem, entry, ctx.sync);
2195                 entry->is_mapped = false;
2196
2197                 mem->mapped_to_gpu_memory--;
2198                 pr_debug("\t DEC mapping count %d\n",
2199                          mem->mapped_to_gpu_memory);
2200         }
2201
2202 unreserve_out:
2203         unreserve_bo_and_vms(&ctx, false, false);
2204 out:
2205         mutex_unlock(&mem->lock);
2206         return ret;
2207 }
2208
2209 int amdgpu_amdkfd_gpuvm_sync_memory(
2210                 struct amdgpu_device *adev, struct kgd_mem *mem, bool intr)
2211 {
2212         struct amdgpu_sync sync;
2213         int ret;
2214
2215         amdgpu_sync_create(&sync);
2216
2217         mutex_lock(&mem->lock);
2218         amdgpu_sync_clone(&mem->sync, &sync);
2219         mutex_unlock(&mem->lock);
2220
2221         ret = amdgpu_sync_wait(&sync, intr);
2222         amdgpu_sync_free(&sync);
2223         return ret;
2224 }
2225
2226 /**
2227  * amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count
2228  * @bo: Buffer object to be mapped
2229  *
2230  * Before return, bo reference count is incremented. To release the reference and unpin/
2231  * unmap the BO, call amdgpu_amdkfd_free_gtt_mem.
2232  */
2233 int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
2234 {
2235         int ret;
2236
2237         ret = amdgpu_bo_reserve(bo, true);
2238         if (ret) {
2239                 pr_err("Failed to reserve bo. ret %d\n", ret);
2240                 goto err_reserve_bo_failed;
2241         }
2242
2243         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
2244         if (ret) {
2245                 pr_err("Failed to pin bo. ret %d\n", ret);
2246                 goto err_pin_bo_failed;
2247         }
2248
2249         ret = amdgpu_ttm_alloc_gart(&bo->tbo);
2250         if (ret) {
2251                 pr_err("Failed to bind bo to GART. ret %d\n", ret);
2252                 goto err_map_bo_gart_failed;
2253         }
2254
2255         amdgpu_amdkfd_remove_eviction_fence(
2256                 bo, bo->vm_bo->vm->process_info->eviction_fence);
2257
2258         amdgpu_bo_unreserve(bo);
2259
2260         bo = amdgpu_bo_ref(bo);
2261
2262         return 0;
2263
2264 err_map_bo_gart_failed:
2265         amdgpu_bo_unpin(bo);
2266 err_pin_bo_failed:
2267         amdgpu_bo_unreserve(bo);
2268 err_reserve_bo_failed:
2269
2270         return ret;
2271 }
2272
2273 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Map a GTT BO for kernel CPU access
2274  *
2275  * @mem: Buffer object to be mapped for CPU access
2276  * @kptr[out]: pointer in kernel CPU address space
2277  * @size[out]: size of the buffer
2278  *
2279  * Pins the BO and maps it for kernel CPU access. The eviction fence is removed
2280  * from the BO, since pinned BOs cannot be evicted. The bo must remain on the
2281  * validate_list, so the GPU mapping can be restored after a page table was
2282  * evicted.
2283  *
2284  * Return: 0 on success, error code on failure
2285  */
2286 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
2287                                              void **kptr, uint64_t *size)
2288 {
2289         int ret;
2290         struct amdgpu_bo *bo = mem->bo;
2291
2292         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
2293                 pr_err("userptr can't be mapped to kernel\n");
2294                 return -EINVAL;
2295         }
2296
2297         mutex_lock(&mem->process_info->lock);
2298
2299         ret = amdgpu_bo_reserve(bo, true);
2300         if (ret) {
2301                 pr_err("Failed to reserve bo. ret %d\n", ret);
2302                 goto bo_reserve_failed;
2303         }
2304
2305         ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
2306         if (ret) {
2307                 pr_err("Failed to pin bo. ret %d\n", ret);
2308                 goto pin_failed;
2309         }
2310
2311         ret = amdgpu_bo_kmap(bo, kptr);
2312         if (ret) {
2313                 pr_err("Failed to map bo to kernel. ret %d\n", ret);
2314                 goto kmap_failed;
2315         }
2316
2317         amdgpu_amdkfd_remove_eviction_fence(
2318                 bo, mem->process_info->eviction_fence);
2319
2320         if (size)
2321                 *size = amdgpu_bo_size(bo);
2322
2323         amdgpu_bo_unreserve(bo);
2324
2325         mutex_unlock(&mem->process_info->lock);
2326         return 0;
2327
2328 kmap_failed:
2329         amdgpu_bo_unpin(bo);
2330 pin_failed:
2331         amdgpu_bo_unreserve(bo);
2332 bo_reserve_failed:
2333         mutex_unlock(&mem->process_info->lock);
2334
2335         return ret;
2336 }
2337
2338 /** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Unmap a GTT BO for kernel CPU access
2339  *
2340  * @mem: Buffer object to be unmapped for CPU access
2341  *
2342  * Removes the kernel CPU mapping and unpins the BO. It does not restore the
2343  * eviction fence, so this function should only be used for cleanup before the
2344  * BO is destroyed.
2345  */
2346 void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
2347 {
2348         struct amdgpu_bo *bo = mem->bo;
2349
2350         amdgpu_bo_reserve(bo, true);
2351         amdgpu_bo_kunmap(bo);
2352         amdgpu_bo_unpin(bo);
2353         amdgpu_bo_unreserve(bo);
2354 }
2355
2356 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
2357                                           struct kfd_vm_fault_info *mem)
2358 {
2359         if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
2360                 *mem = *adev->gmc.vm_fault_info;
2361                 mb(); /* make sure read happened */
2362                 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
2363         }
2364         return 0;
2365 }
2366
2367 static int import_obj_create(struct amdgpu_device *adev,
2368                              struct dma_buf *dma_buf,
2369                              struct drm_gem_object *obj,
2370                              uint64_t va, void *drm_priv,
2371                              struct kgd_mem **mem, uint64_t *size,
2372                              uint64_t *mmap_offset)
2373 {
2374         struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
2375         struct amdgpu_bo *bo;
2376         int ret;
2377
2378         bo = gem_to_amdgpu_bo(obj);
2379         if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
2380                                     AMDGPU_GEM_DOMAIN_GTT)))
2381                 /* Only VRAM and GTT BOs are supported */
2382                 return -EINVAL;
2383
2384         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2385         if (!*mem)
2386                 return -ENOMEM;
2387
2388         ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
2389         if (ret)
2390                 goto err_free_mem;
2391
2392         if (size)
2393                 *size = amdgpu_bo_size(bo);
2394
2395         if (mmap_offset)
2396                 *mmap_offset = amdgpu_bo_mmap_offset(bo);
2397
2398         INIT_LIST_HEAD(&(*mem)->attachments);
2399         mutex_init(&(*mem)->lock);
2400
2401         (*mem)->alloc_flags =
2402                 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
2403                 KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
2404                 | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
2405                 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
2406
2407         get_dma_buf(dma_buf);
2408         (*mem)->dmabuf = dma_buf;
2409         (*mem)->bo = bo;
2410         (*mem)->va = va;
2411         (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) &&
2412                          !(adev->flags & AMD_IS_APU) ?
2413                          AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
2414
2415         (*mem)->mapped_to_gpu_memory = 0;
2416         (*mem)->process_info = avm->process_info;
2417         add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
2418         amdgpu_sync_create(&(*mem)->sync);
2419         (*mem)->is_imported = true;
2420
2421         mutex_lock(&avm->process_info->lock);
2422         if (avm->process_info->eviction_fence &&
2423             !dma_fence_is_signaled(&avm->process_info->eviction_fence->base))
2424                 ret = amdgpu_amdkfd_bo_validate_and_fence(bo, (*mem)->domain,
2425                                 &avm->process_info->eviction_fence->base);
2426         mutex_unlock(&avm->process_info->lock);
2427         if (ret)
2428                 goto err_remove_mem;
2429
2430         return 0;
2431
2432 err_remove_mem:
2433         remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
2434         drm_vma_node_revoke(&obj->vma_node, drm_priv);
2435 err_free_mem:
2436         kfree(*mem);
2437         return ret;
2438 }
2439
2440 int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd,
2441                                          uint64_t va, void *drm_priv,
2442                                          struct kgd_mem **mem, uint64_t *size,
2443                                          uint64_t *mmap_offset)
2444 {
2445         struct drm_gem_object *obj;
2446         uint32_t handle;
2447         int ret;
2448
2449         ret = drm_gem_prime_fd_to_handle(&adev->ddev, adev->kfd.client.file, fd,
2450                                          &handle);
2451         if (ret)
2452                 return ret;
2453         obj = drm_gem_object_lookup(adev->kfd.client.file, handle);
2454         if (!obj) {
2455                 ret = -EINVAL;
2456                 goto err_release_handle;
2457         }
2458
2459         ret = import_obj_create(adev, obj->dma_buf, obj, va, drm_priv, mem, size,
2460                                 mmap_offset);
2461         if (ret)
2462                 goto err_put_obj;
2463
2464         (*mem)->gem_handle = handle;
2465
2466         return 0;
2467
2468 err_put_obj:
2469         drm_gem_object_put(obj);
2470 err_release_handle:
2471         drm_gem_handle_delete(adev->kfd.client.file, handle);
2472         return ret;
2473 }
2474
2475 int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
2476                                       struct dma_buf **dma_buf)
2477 {
2478         int ret;
2479
2480         mutex_lock(&mem->lock);
2481         ret = kfd_mem_export_dmabuf(mem);
2482         if (ret)
2483                 goto out;
2484
2485         get_dma_buf(mem->dmabuf);
2486         *dma_buf = mem->dmabuf;
2487 out:
2488         mutex_unlock(&mem->lock);
2489         return ret;
2490 }
2491
2492 /* Evict a userptr BO by stopping the queues if necessary
2493  *
2494  * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
2495  * cannot do any memory allocations, and cannot take any locks that
2496  * are held elsewhere while allocating memory.
2497  *
2498  * It doesn't do anything to the BO itself. The real work happens in
2499  * restore, where we get updated page addresses. This function only
2500  * ensures that GPU access to the BO is stopped.
2501  */
2502 int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
2503                                 unsigned long cur_seq, struct kgd_mem *mem)
2504 {
2505         struct amdkfd_process_info *process_info = mem->process_info;
2506         int r = 0;
2507
2508         /* Do not process MMU notifications during CRIU restore until
2509          * KFD_CRIU_OP_RESUME IOCTL is received
2510          */
2511         if (READ_ONCE(process_info->block_mmu_notifications))
2512                 return 0;
2513
2514         mutex_lock(&process_info->notifier_lock);
2515         mmu_interval_set_seq(mni, cur_seq);
2516
2517         mem->invalid++;
2518         if (++process_info->evicted_bos == 1) {
2519                 /* First eviction, stop the queues */
2520                 r = kgd2kfd_quiesce_mm(mni->mm,
2521                                        KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
2522                 if (r)
2523                         pr_err("Failed to quiesce KFD\n");
2524                 queue_delayed_work(system_freezable_wq,
2525                         &process_info->restore_userptr_work,
2526                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2527         }
2528         mutex_unlock(&process_info->notifier_lock);
2529
2530         return r;
2531 }
2532
2533 /* Update invalid userptr BOs
2534  *
2535  * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
2536  * userptr_inval_list and updates user pages for all BOs that have
2537  * been invalidated since their last update.
2538  */
2539 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
2540                                      struct mm_struct *mm)
2541 {
2542         struct kgd_mem *mem, *tmp_mem;
2543         struct amdgpu_bo *bo;
2544         struct ttm_operation_ctx ctx = { false, false };
2545         uint32_t invalid;
2546         int ret = 0;
2547
2548         mutex_lock(&process_info->notifier_lock);
2549
2550         /* Move all invalidated BOs to the userptr_inval_list */
2551         list_for_each_entry_safe(mem, tmp_mem,
2552                                  &process_info->userptr_valid_list,
2553                                  validate_list)
2554                 if (mem->invalid)
2555                         list_move_tail(&mem->validate_list,
2556                                        &process_info->userptr_inval_list);
2557
2558         /* Go through userptr_inval_list and update any invalid user_pages */
2559         list_for_each_entry(mem, &process_info->userptr_inval_list,
2560                             validate_list) {
2561                 invalid = mem->invalid;
2562                 if (!invalid)
2563                         /* BO hasn't been invalidated since the last
2564                          * revalidation attempt. Keep its page list.
2565                          */
2566                         continue;
2567
2568                 bo = mem->bo;
2569
2570                 amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
2571                 mem->range = NULL;
2572
2573                 /* BO reservations and getting user pages (hmm_range_fault)
2574                  * must happen outside the notifier lock
2575                  */
2576                 mutex_unlock(&process_info->notifier_lock);
2577
2578                 /* Move the BO to system (CPU) domain if necessary to unmap
2579                  * and free the SG table
2580                  */
2581                 if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) {
2582                         if (amdgpu_bo_reserve(bo, true))
2583                                 return -EAGAIN;
2584                         amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
2585                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2586                         amdgpu_bo_unreserve(bo);
2587                         if (ret) {
2588                                 pr_err("%s: Failed to invalidate userptr BO\n",
2589                                        __func__);
2590                                 return -EAGAIN;
2591                         }
2592                 }
2593
2594                 /* Get updated user pages */
2595                 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
2596                                                    &mem->range);
2597                 if (ret) {
2598                         pr_debug("Failed %d to get user pages\n", ret);
2599
2600                         /* Return -EFAULT bad address error as success. It will
2601                          * fail later with a VM fault if the GPU tries to access
2602                          * it. Better than hanging indefinitely with stalled
2603                          * user mode queues.
2604                          *
2605                          * Return other error -EBUSY or -ENOMEM to retry restore
2606                          */
2607                         if (ret != -EFAULT)
2608                                 return ret;
2609
2610                         ret = 0;
2611                 }
2612
2613                 mutex_lock(&process_info->notifier_lock);
2614
2615                 /* Mark the BO as valid unless it was invalidated
2616                  * again concurrently.
2617                  */
2618                 if (mem->invalid != invalid) {
2619                         ret = -EAGAIN;
2620                         goto unlock_out;
2621                 }
2622                  /* set mem valid if mem has hmm range associated */
2623                 if (mem->range)
2624                         mem->invalid = 0;
2625         }
2626
2627 unlock_out:
2628         mutex_unlock(&process_info->notifier_lock);
2629
2630         return ret;
2631 }
2632
2633 /* Validate invalid userptr BOs
2634  *
2635  * Validates BOs on the userptr_inval_list. Also updates GPUVM page tables
2636  * with new page addresses and waits for the page table updates to complete.
2637  */
2638 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
2639 {
2640         struct ttm_operation_ctx ctx = { false, false };
2641         struct amdgpu_sync sync;
2642         struct drm_exec exec;
2643
2644         struct amdgpu_vm *peer_vm;
2645         struct kgd_mem *mem, *tmp_mem;
2646         struct amdgpu_bo *bo;
2647         int ret;
2648
2649         amdgpu_sync_create(&sync);
2650
2651         drm_exec_init(&exec, 0, 0);
2652         /* Reserve all BOs and page tables for validation */
2653         drm_exec_until_all_locked(&exec) {
2654                 /* Reserve all the page directories */
2655                 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2656                                     vm_list_node) {
2657                         ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
2658                         drm_exec_retry_on_contention(&exec);
2659                         if (unlikely(ret))
2660                                 goto unreserve_out;
2661                 }
2662
2663                 /* Reserve the userptr_inval_list entries to resv_list */
2664                 list_for_each_entry(mem, &process_info->userptr_inval_list,
2665                                     validate_list) {
2666                         struct drm_gem_object *gobj;
2667
2668                         gobj = &mem->bo->tbo.base;
2669                         ret = drm_exec_prepare_obj(&exec, gobj, 1);
2670                         drm_exec_retry_on_contention(&exec);
2671                         if (unlikely(ret))
2672                                 goto unreserve_out;
2673                 }
2674         }
2675
2676         ret = process_validate_vms(process_info, NULL);
2677         if (ret)
2678                 goto unreserve_out;
2679
2680         /* Validate BOs and update GPUVM page tables */
2681         list_for_each_entry_safe(mem, tmp_mem,
2682                                  &process_info->userptr_inval_list,
2683                                  validate_list) {
2684                 struct kfd_mem_attachment *attachment;
2685
2686                 bo = mem->bo;
2687
2688                 /* Validate the BO if we got user pages */
2689                 if (bo->tbo.ttm->pages[0]) {
2690                         amdgpu_bo_placement_from_domain(bo, mem->domain);
2691                         ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
2692                         if (ret) {
2693                                 pr_err("%s: failed to validate BO\n", __func__);
2694                                 goto unreserve_out;
2695                         }
2696                 }
2697
2698                 /* Update mapping. If the BO was not validated
2699                  * (because we couldn't get user pages), this will
2700                  * clear the page table entries, which will result in
2701                  * VM faults if the GPU tries to access the invalid
2702                  * memory.
2703                  */
2704                 list_for_each_entry(attachment, &mem->attachments, list) {
2705                         if (!attachment->is_mapped)
2706                                 continue;
2707
2708                         kfd_mem_dmaunmap_attachment(mem, attachment);
2709                         ret = update_gpuvm_pte(mem, attachment, &sync);
2710                         if (ret) {
2711                                 pr_err("%s: update PTE failed\n", __func__);
2712                                 /* make sure this gets validated again */
2713                                 mutex_lock(&process_info->notifier_lock);
2714                                 mem->invalid++;
2715                                 mutex_unlock(&process_info->notifier_lock);
2716                                 goto unreserve_out;
2717                         }
2718                 }
2719         }
2720
2721         /* Update page directories */
2722         ret = process_update_pds(process_info, &sync);
2723
2724 unreserve_out:
2725         drm_exec_fini(&exec);
2726         amdgpu_sync_wait(&sync, false);
2727         amdgpu_sync_free(&sync);
2728
2729         return ret;
2730 }
2731
2732 /* Confirm that all user pages are valid while holding the notifier lock
2733  *
2734  * Moves valid BOs from the userptr_inval_list back to userptr_val_list.
2735  */
2736 static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_info)
2737 {
2738         struct kgd_mem *mem, *tmp_mem;
2739         int ret = 0;
2740
2741         list_for_each_entry_safe(mem, tmp_mem,
2742                                  &process_info->userptr_inval_list,
2743                                  validate_list) {
2744                 bool valid;
2745
2746                 /* keep mem without hmm range at userptr_inval_list */
2747                 if (!mem->range)
2748                         continue;
2749
2750                 /* Only check mem with hmm range associated */
2751                 valid = amdgpu_ttm_tt_get_user_pages_done(
2752                                         mem->bo->tbo.ttm, mem->range);
2753
2754                 mem->range = NULL;
2755                 if (!valid) {
2756                         WARN(!mem->invalid, "Invalid BO not marked invalid");
2757                         ret = -EAGAIN;
2758                         continue;
2759                 }
2760
2761                 if (mem->invalid) {
2762                         WARN(1, "Valid BO is marked invalid");
2763                         ret = -EAGAIN;
2764                         continue;
2765                 }
2766
2767                 list_move_tail(&mem->validate_list,
2768                                &process_info->userptr_valid_list);
2769         }
2770
2771         return ret;
2772 }
2773
2774 /* Worker callback to restore evicted userptr BOs
2775  *
2776  * Tries to update and validate all userptr BOs. If successful and no
2777  * concurrent evictions happened, the queues are restarted. Otherwise,
2778  * reschedule for another attempt later.
2779  */
2780 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
2781 {
2782         struct delayed_work *dwork = to_delayed_work(work);
2783         struct amdkfd_process_info *process_info =
2784                 container_of(dwork, struct amdkfd_process_info,
2785                              restore_userptr_work);
2786         struct task_struct *usertask;
2787         struct mm_struct *mm;
2788         uint32_t evicted_bos;
2789
2790         mutex_lock(&process_info->notifier_lock);
2791         evicted_bos = process_info->evicted_bos;
2792         mutex_unlock(&process_info->notifier_lock);
2793         if (!evicted_bos)
2794                 return;
2795
2796         /* Reference task and mm in case of concurrent process termination */
2797         usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
2798         if (!usertask)
2799                 return;
2800         mm = get_task_mm(usertask);
2801         if (!mm) {
2802                 put_task_struct(usertask);
2803                 return;
2804         }
2805
2806         mutex_lock(&process_info->lock);
2807
2808         if (update_invalid_user_pages(process_info, mm))
2809                 goto unlock_out;
2810         /* userptr_inval_list can be empty if all evicted userptr BOs
2811          * have been freed. In that case there is nothing to validate
2812          * and we can just restart the queues.
2813          */
2814         if (!list_empty(&process_info->userptr_inval_list)) {
2815                 if (validate_invalid_user_pages(process_info))
2816                         goto unlock_out;
2817         }
2818         /* Final check for concurrent evicton and atomic update. If
2819          * another eviction happens after successful update, it will
2820          * be a first eviction that calls quiesce_mm. The eviction
2821          * reference counting inside KFD will handle this case.
2822          */
2823         mutex_lock(&process_info->notifier_lock);
2824         if (process_info->evicted_bos != evicted_bos)
2825                 goto unlock_notifier_out;
2826
2827         if (confirm_valid_user_pages_locked(process_info)) {
2828                 WARN(1, "User pages unexpectedly invalid");
2829                 goto unlock_notifier_out;
2830         }
2831
2832         process_info->evicted_bos = evicted_bos = 0;
2833
2834         if (kgd2kfd_resume_mm(mm)) {
2835                 pr_err("%s: Failed to resume KFD\n", __func__);
2836                 /* No recovery from this failure. Probably the CP is
2837                  * hanging. No point trying again.
2838                  */
2839         }
2840
2841 unlock_notifier_out:
2842         mutex_unlock(&process_info->notifier_lock);
2843 unlock_out:
2844         mutex_unlock(&process_info->lock);
2845
2846         /* If validation failed, reschedule another attempt */
2847         if (evicted_bos) {
2848                 queue_delayed_work(system_freezable_wq,
2849                         &process_info->restore_userptr_work,
2850                         msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
2851
2852                 kfd_smi_event_queue_restore_rescheduled(mm);
2853         }
2854         mmput(mm);
2855         put_task_struct(usertask);
2856 }
2857
2858 static void replace_eviction_fence(struct dma_fence __rcu **ef,
2859                                    struct dma_fence *new_ef)
2860 {
2861         struct dma_fence *old_ef = rcu_replace_pointer(*ef, new_ef, true
2862                 /* protected by process_info->lock */);
2863
2864         /* If we're replacing an unsignaled eviction fence, that fence will
2865          * never be signaled, and if anyone is still waiting on that fence,
2866          * they will hang forever. This should never happen. We should only
2867          * replace the fence in restore_work that only gets scheduled after
2868          * eviction work signaled the fence.
2869          */
2870         WARN_ONCE(!dma_fence_is_signaled(old_ef),
2871                   "Replacing unsignaled eviction fence");
2872         dma_fence_put(old_ef);
2873 }
2874
2875 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
2876  *   KFD process identified by process_info
2877  *
2878  * @process_info: amdkfd_process_info of the KFD process
2879  *
2880  * After memory eviction, restore thread calls this function. The function
2881  * should be called when the Process is still valid. BO restore involves -
2882  *
2883  * 1.  Release old eviction fence and create new one
2884  * 2.  Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
2885  * 3   Use the second PD list and kfd_bo_list to create a list (ctx.list) of
2886  *     BOs that need to be reserved.
2887  * 4.  Reserve all the BOs
2888  * 5.  Validate of PD and PT BOs.
2889  * 6.  Validate all KFD BOs using kfd_bo_list and Map them and add new fence
2890  * 7.  Add fence to all PD and PT BOs.
2891  * 8.  Unreserve all BOs
2892  */
2893 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu **ef)
2894 {
2895         struct amdkfd_process_info *process_info = info;
2896         struct amdgpu_vm *peer_vm;
2897         struct kgd_mem *mem;
2898         struct list_head duplicate_save;
2899         struct amdgpu_sync sync_obj;
2900         unsigned long failed_size = 0;
2901         unsigned long total_size = 0;
2902         struct drm_exec exec;
2903         int ret;
2904
2905         INIT_LIST_HEAD(&duplicate_save);
2906
2907         mutex_lock(&process_info->lock);
2908
2909         drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
2910         drm_exec_until_all_locked(&exec) {
2911                 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2912                                     vm_list_node) {
2913                         ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
2914                         drm_exec_retry_on_contention(&exec);
2915                         if (unlikely(ret)) {
2916                                 pr_err("Locking VM PD failed, ret: %d\n", ret);
2917                                 goto ttm_reserve_fail;
2918                         }
2919                 }
2920
2921                 /* Reserve all BOs and page tables/directory. Add all BOs from
2922                  * kfd_bo_list to ctx.list
2923                  */
2924                 list_for_each_entry(mem, &process_info->kfd_bo_list,
2925                                     validate_list) {
2926                         struct drm_gem_object *gobj;
2927
2928                         gobj = &mem->bo->tbo.base;
2929                         ret = drm_exec_prepare_obj(&exec, gobj, 1);
2930                         drm_exec_retry_on_contention(&exec);
2931                         if (unlikely(ret)) {
2932                                 pr_err("drm_exec_prepare_obj failed, ret: %d\n", ret);
2933                                 goto ttm_reserve_fail;
2934                         }
2935                 }
2936         }
2937
2938         amdgpu_sync_create(&sync_obj);
2939
2940         /* Validate BOs managed by KFD */
2941         list_for_each_entry(mem, &process_info->kfd_bo_list,
2942                             validate_list) {
2943
2944                 struct amdgpu_bo *bo = mem->bo;
2945                 uint32_t domain = mem->domain;
2946                 struct dma_resv_iter cursor;
2947                 struct dma_fence *fence;
2948
2949                 total_size += amdgpu_bo_size(bo);
2950
2951                 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2952                 if (ret) {
2953                         pr_debug("Memory eviction: Validate BOs failed\n");
2954                         failed_size += amdgpu_bo_size(bo);
2955                         ret = amdgpu_amdkfd_bo_validate(bo,
2956                                                 AMDGPU_GEM_DOMAIN_GTT, false);
2957                         if (ret) {
2958                                 pr_debug("Memory eviction: Try again\n");
2959                                 goto validate_map_fail;
2960                         }
2961                 }
2962                 dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
2963                                         DMA_RESV_USAGE_KERNEL, fence) {
2964                         ret = amdgpu_sync_fence(&sync_obj, fence);
2965                         if (ret) {
2966                                 pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2967                                 goto validate_map_fail;
2968                         }
2969                 }
2970         }
2971
2972         if (failed_size)
2973                 pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
2974
2975         /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
2976          * validations above would invalidate DMABuf imports again.
2977          */
2978         ret = process_validate_vms(process_info, &exec.ticket);
2979         if (ret) {
2980                 pr_debug("Validating VMs failed, ret: %d\n", ret);
2981                 goto validate_map_fail;
2982         }
2983
2984         /* Update mappings managed by KFD. */
2985         list_for_each_entry(mem, &process_info->kfd_bo_list,
2986                             validate_list) {
2987                 struct kfd_mem_attachment *attachment;
2988
2989                 list_for_each_entry(attachment, &mem->attachments, list) {
2990                         if (!attachment->is_mapped)
2991                                 continue;
2992
2993                         kfd_mem_dmaunmap_attachment(mem, attachment);
2994                         ret = update_gpuvm_pte(mem, attachment, &sync_obj);
2995                         if (ret) {
2996                                 pr_debug("Memory eviction: update PTE failed. Try again\n");
2997                                 goto validate_map_fail;
2998                         }
2999                 }
3000         }
3001
3002         /* Update mappings not managed by KFD */
3003         list_for_each_entry(peer_vm, &process_info->vm_list_head,
3004                         vm_list_node) {
3005                 struct amdgpu_device *adev = amdgpu_ttm_adev(
3006                         peer_vm->root.bo->tbo.bdev);
3007
3008                 ret = amdgpu_vm_handle_moved(adev, peer_vm, &exec.ticket);
3009                 if (ret) {
3010                         pr_debug("Memory eviction: handle moved failed. Try again\n");
3011                         goto validate_map_fail;
3012                 }
3013         }
3014
3015         /* Update page directories */
3016         ret = process_update_pds(process_info, &sync_obj);
3017         if (ret) {
3018                 pr_debug("Memory eviction: update PDs failed. Try again\n");
3019                 goto validate_map_fail;
3020         }
3021
3022         /* Sync with fences on all the page tables. They implicitly depend on any
3023          * move fences from amdgpu_vm_handle_moved above.
3024          */
3025         ret = process_sync_pds_resv(process_info, &sync_obj);
3026         if (ret) {
3027                 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
3028                 goto validate_map_fail;
3029         }
3030
3031         /* Wait for validate and PT updates to finish */
3032         amdgpu_sync_wait(&sync_obj, false);
3033
3034         /* The old eviction fence may be unsignaled if restore happens
3035          * after a GPU reset or suspend/resume. Keep the old fence in that
3036          * case. Otherwise release the old eviction fence and create new
3037          * one, because fence only goes from unsignaled to signaled once
3038          * and cannot be reused. Use context and mm from the old fence.
3039          *
3040          * If an old eviction fence signals after this check, that's OK.
3041          * Anyone signaling an eviction fence must stop the queues first
3042          * and schedule another restore worker.
3043          */
3044         if (dma_fence_is_signaled(&process_info->eviction_fence->base)) {
3045                 struct amdgpu_amdkfd_fence *new_fence =
3046                         amdgpu_amdkfd_fence_create(
3047                                 process_info->eviction_fence->base.context,
3048                                 process_info->eviction_fence->mm,
3049                                 NULL);
3050
3051                 if (!new_fence) {
3052                         pr_err("Failed to create eviction fence\n");
3053                         ret = -ENOMEM;
3054                         goto validate_map_fail;
3055                 }
3056                 dma_fence_put(&process_info->eviction_fence->base);
3057                 process_info->eviction_fence = new_fence;
3058                 replace_eviction_fence(ef, dma_fence_get(&new_fence->base));
3059         } else {
3060                 WARN_ONCE(*ef != &process_info->eviction_fence->base,
3061                           "KFD eviction fence doesn't match KGD process_info");
3062         }
3063
3064         /* Attach new eviction fence to all BOs except pinned ones */
3065         list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) {
3066                 if (mem->bo->tbo.pin_count)
3067                         continue;
3068
3069                 dma_resv_add_fence(mem->bo->tbo.base.resv,
3070                                    &process_info->eviction_fence->base,
3071                                    DMA_RESV_USAGE_BOOKKEEP);
3072         }
3073         /* Attach eviction fence to PD / PT BOs and DMABuf imports */
3074         list_for_each_entry(peer_vm, &process_info->vm_list_head,
3075                             vm_list_node) {
3076                 struct amdgpu_bo *bo = peer_vm->root.bo;
3077
3078                 dma_resv_add_fence(bo->tbo.base.resv,
3079                                    &process_info->eviction_fence->base,
3080                                    DMA_RESV_USAGE_BOOKKEEP);
3081         }
3082
3083 validate_map_fail:
3084         amdgpu_sync_free(&sync_obj);
3085 ttm_reserve_fail:
3086         drm_exec_fini(&exec);
3087         mutex_unlock(&process_info->lock);
3088         return ret;
3089 }
3090
3091 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
3092 {
3093         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
3094         struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
3095         int ret;
3096
3097         if (!info || !gws)
3098                 return -EINVAL;
3099
3100         *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
3101         if (!*mem)
3102                 return -ENOMEM;
3103
3104         mutex_init(&(*mem)->lock);
3105         INIT_LIST_HEAD(&(*mem)->attachments);
3106         (*mem)->bo = amdgpu_bo_ref(gws_bo);
3107         (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
3108         (*mem)->process_info = process_info;
3109         add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
3110         amdgpu_sync_create(&(*mem)->sync);
3111
3112
3113         /* Validate gws bo the first time it is added to process */
3114         mutex_lock(&(*mem)->process_info->lock);
3115         ret = amdgpu_bo_reserve(gws_bo, false);
3116         if (unlikely(ret)) {
3117                 pr_err("Reserve gws bo failed %d\n", ret);
3118                 goto bo_reservation_failure;
3119         }
3120
3121         ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
3122         if (ret) {
3123                 pr_err("GWS BO validate failed %d\n", ret);
3124                 goto bo_validation_failure;
3125         }
3126         /* GWS resource is shared b/t amdgpu and amdkfd
3127          * Add process eviction fence to bo so they can
3128          * evict each other.
3129          */
3130         ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
3131         if (ret)
3132                 goto reserve_shared_fail;
3133         dma_resv_add_fence(gws_bo->tbo.base.resv,
3134                            &process_info->eviction_fence->base,
3135                            DMA_RESV_USAGE_BOOKKEEP);
3136         amdgpu_bo_unreserve(gws_bo);
3137         mutex_unlock(&(*mem)->process_info->lock);
3138
3139         return ret;
3140
3141 reserve_shared_fail:
3142 bo_validation_failure:
3143         amdgpu_bo_unreserve(gws_bo);
3144 bo_reservation_failure:
3145         mutex_unlock(&(*mem)->process_info->lock);
3146         amdgpu_sync_free(&(*mem)->sync);
3147         remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
3148         amdgpu_bo_unref(&gws_bo);
3149         mutex_destroy(&(*mem)->lock);
3150         kfree(*mem);
3151         *mem = NULL;
3152         return ret;
3153 }
3154
3155 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
3156 {
3157         int ret;
3158         struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
3159         struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
3160         struct amdgpu_bo *gws_bo = kgd_mem->bo;
3161
3162         /* Remove BO from process's validate list so restore worker won't touch
3163          * it anymore
3164          */
3165         remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
3166
3167         ret = amdgpu_bo_reserve(gws_bo, false);
3168         if (unlikely(ret)) {
3169                 pr_err("Reserve gws bo failed %d\n", ret);
3170                 //TODO add BO back to validate_list?
3171                 return ret;
3172         }
3173         amdgpu_amdkfd_remove_eviction_fence(gws_bo,
3174                         process_info->eviction_fence);
3175         amdgpu_bo_unreserve(gws_bo);
3176         amdgpu_sync_free(&kgd_mem->sync);
3177         amdgpu_bo_unref(&gws_bo);
3178         mutex_destroy(&kgd_mem->lock);
3179         kfree(mem);
3180         return 0;
3181 }
3182
3183 /* Returns GPU-specific tiling mode information */
3184 int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
3185                                 struct tile_config *config)
3186 {
3187         config->gb_addr_config = adev->gfx.config.gb_addr_config;
3188         config->tile_config_ptr = adev->gfx.config.tile_mode_array;
3189         config->num_tile_configs =
3190                         ARRAY_SIZE(adev->gfx.config.tile_mode_array);
3191         config->macro_tile_config_ptr =
3192                         adev->gfx.config.macrotile_mode_array;
3193         config->num_macro_tile_configs =
3194                         ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
3195
3196         /* Those values are not set from GFX9 onwards */
3197         config->num_banks = adev->gfx.config.num_banks;
3198         config->num_ranks = adev->gfx.config.num_ranks;
3199
3200         return 0;
3201 }
3202
3203 bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem)
3204 {
3205         struct kfd_mem_attachment *entry;
3206
3207         list_for_each_entry(entry, &mem->attachments, list) {
3208                 if (entry->is_mapped && entry->adev == adev)
3209                         return true;
3210         }
3211         return false;
3212 }
3213
3214 #if defined(CONFIG_DEBUG_FS)
3215
3216 int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data)
3217 {
3218
3219         spin_lock(&kfd_mem_limit.mem_limit_lock);
3220         seq_printf(m, "System mem used %lldM out of %lluM\n",
3221                   (kfd_mem_limit.system_mem_used >> 20),
3222                   (kfd_mem_limit.max_system_mem_limit >> 20));
3223         seq_printf(m, "TTM mem used %lldM out of %lluM\n",
3224                   (kfd_mem_limit.ttm_mem_used >> 20),
3225                   (kfd_mem_limit.max_ttm_mem_limit >> 20));
3226         spin_unlock(&kfd_mem_limit.mem_limit_lock);
3227
3228         return 0;
3229 }
3230
3231 #endif
This page took 0.231532 seconds and 4 git commands to generate.