2 * Copyright 2014-2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #define pr_fmt(fmt) "kfd2kgd: " fmt
25 #include <linux/dma-buf.h>
26 #include <linux/list.h>
27 #include <linux/pagemap.h>
28 #include <linux/sched/mm.h>
29 #include <linux/sched/task.h>
31 #include "amdgpu_object.h"
32 #include "amdgpu_vm.h"
33 #include "amdgpu_amdkfd.h"
34 #include "amdgpu_dma_buf.h"
36 /* BO flag to indicate a KFD userptr BO */
37 #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
39 /* Userptr restore delay, just long enough to allow consecutive VM
40 * changes to accumulate
42 #define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
44 /* Impose limit on how much memory KFD can use */
46 uint64_t max_system_mem_limit;
47 uint64_t max_ttm_mem_limit;
48 int64_t system_mem_used;
50 spinlock_t mem_limit_lock;
53 /* Struct used for amdgpu_amdkfd_bo_validate */
54 struct amdgpu_vm_parser {
59 static const char * const domain_bit_to_string[] = {
68 #define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
70 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
73 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
75 return (struct amdgpu_device *)kgd;
78 static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
81 struct kfd_bo_va_list *entry;
83 list_for_each_entry(entry, &mem->bo_va_list, bo_list)
84 if (entry->bo_va->base.vm == avm)
90 /* Set memory usage limits. Current, limits are
91 * System (TTM + userptr) memory - 3/4th System RAM
92 * TTM memory - 3/8th System RAM
94 void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
100 mem = si.totalram - si.totalhigh;
103 spin_lock_init(&kfd_mem_limit.mem_limit_lock);
104 kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2);
105 kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
106 pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
107 (kfd_mem_limit.max_system_mem_limit >> 20),
108 (kfd_mem_limit.max_ttm_mem_limit >> 20));
111 static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
112 uint64_t size, u32 domain, bool sg)
114 size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
115 uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9;
118 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
119 sizeof(struct amdgpu_bo));
122 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
124 system_mem_needed = acc_size + size;
125 ttm_mem_needed = acc_size + size;
126 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
128 system_mem_needed = acc_size + size;
129 ttm_mem_needed = acc_size;
132 system_mem_needed = acc_size;
133 ttm_mem_needed = acc_size;
134 if (domain == AMDGPU_GEM_DOMAIN_VRAM)
138 spin_lock(&kfd_mem_limit.mem_limit_lock);
140 if ((kfd_mem_limit.system_mem_used + system_mem_needed >
141 kfd_mem_limit.max_system_mem_limit) ||
142 (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
143 kfd_mem_limit.max_ttm_mem_limit) ||
144 (adev->kfd.vram_used + vram_needed >
145 adev->gmc.real_vram_size - reserved_for_pt)) {
148 kfd_mem_limit.system_mem_used += system_mem_needed;
149 kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
150 adev->kfd.vram_used += vram_needed;
153 spin_unlock(&kfd_mem_limit.mem_limit_lock);
157 static void unreserve_mem_limit(struct amdgpu_device *adev,
158 uint64_t size, u32 domain, bool sg)
162 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
163 sizeof(struct amdgpu_bo));
165 spin_lock(&kfd_mem_limit.mem_limit_lock);
166 if (domain == AMDGPU_GEM_DOMAIN_GTT) {
167 kfd_mem_limit.system_mem_used -= (acc_size + size);
168 kfd_mem_limit.ttm_mem_used -= (acc_size + size);
169 } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
170 kfd_mem_limit.system_mem_used -= (acc_size + size);
171 kfd_mem_limit.ttm_mem_used -= acc_size;
173 kfd_mem_limit.system_mem_used -= acc_size;
174 kfd_mem_limit.ttm_mem_used -= acc_size;
175 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
176 adev->kfd.vram_used -= size;
177 WARN_ONCE(adev->kfd.vram_used < 0,
178 "kfd VRAM memory accounting unbalanced");
181 WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
182 "kfd system memory accounting unbalanced");
183 WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
184 "kfd TTM memory accounting unbalanced");
186 spin_unlock(&kfd_mem_limit.mem_limit_lock);
189 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
191 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
192 u32 domain = bo->preferred_domains;
193 bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
195 if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
196 domain = AMDGPU_GEM_DOMAIN_CPU;
200 unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
204 /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
205 * reservation object.
207 * @bo: [IN] Remove eviction fence(s) from this BO
208 * @ef: [IN] This eviction fence is removed if it
209 * is present in the shared list.
211 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
213 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
214 struct amdgpu_amdkfd_fence *ef)
216 struct dma_resv *resv = bo->tbo.base.resv;
217 struct dma_resv_list *old, *new;
218 unsigned int i, j, k;
223 old = dma_resv_get_list(resv);
227 new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
232 /* Go through all the shared fences in the resevation object and sort
233 * the interesting ones to the end of the list.
235 for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
238 f = rcu_dereference_protected(old->shared[i],
239 dma_resv_held(resv));
241 if (f->context == ef->base.context)
242 RCU_INIT_POINTER(new->shared[--j], f);
244 RCU_INIT_POINTER(new->shared[k++], f);
246 new->shared_max = old->shared_max;
247 new->shared_count = k;
249 /* Install the new fence list, seqcount provides the barriers */
251 write_seqcount_begin(&resv->seq);
252 RCU_INIT_POINTER(resv->fence, new);
253 write_seqcount_end(&resv->seq);
256 /* Drop the references to the removed fences or move them to ef_list */
257 for (i = j, k = 0; i < old->shared_count; ++i) {
260 f = rcu_dereference_protected(new->shared[i],
261 dma_resv_held(resv));
269 static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
272 struct ttm_operation_ctx ctx = { false, false };
275 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
276 "Called with userptr BO"))
279 amdgpu_bo_placement_from_domain(bo, domain);
281 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
285 amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
291 static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
293 struct amdgpu_vm_parser *p = param;
295 return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
298 /* vm_validate_pt_pd_bos - Validate page table and directory BOs
300 * Page directories are not updated here because huge page handling
301 * during page table updates can invalidate page directory entries
302 * again. Page directories are only updated after updating page
305 static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
307 struct amdgpu_bo *pd = vm->root.base.bo;
308 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
309 struct amdgpu_vm_parser param;
312 param.domain = AMDGPU_GEM_DOMAIN_VRAM;
315 ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
318 pr_err("amdgpu: failed to validate PT BOs\n");
322 ret = amdgpu_amdkfd_validate(¶m, pd);
324 pr_err("amdgpu: failed to validate PD\n");
328 vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
330 if (vm->use_cpu_for_update) {
331 ret = amdgpu_bo_kmap(pd, NULL);
333 pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
341 static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
343 struct amdgpu_bo *pd = vm->root.base.bo;
344 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
347 ret = amdgpu_vm_update_pdes(adev, vm, false);
351 return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
354 static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
356 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
357 bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT;
358 uint32_t mapping_flags;
360 mapping_flags = AMDGPU_VM_PAGE_READABLE;
361 if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE)
362 mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
363 if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE)
364 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
366 switch (adev->asic_type) {
368 if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) {
370 mapping_flags |= coherent ?
371 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
373 mapping_flags |= AMDGPU_VM_MTYPE_UC;
375 mapping_flags |= coherent ?
376 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
380 mapping_flags |= coherent ?
381 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
384 return amdgpu_gem_va_map_flags(adev, mapping_flags);
387 /* add_bo_to_vm - Add a BO to a VM
389 * Everything that needs to bo done only once when a BO is first added
390 * to a VM. It can later be mapped and unmapped many times without
391 * repeating these steps.
393 * 1. Allocate and initialize BO VA entry data structure
394 * 2. Add BO to the VM
395 * 3. Determine ASIC-specific PTE flags
396 * 4. Alloc page tables and directories if needed
397 * 4a. Validate new page tables and directories
399 static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
400 struct amdgpu_vm *vm, bool is_aql,
401 struct kfd_bo_va_list **p_bo_va_entry)
404 struct kfd_bo_va_list *bo_va_entry;
405 struct amdgpu_bo *bo = mem->bo;
406 uint64_t va = mem->va;
407 struct list_head *list_bo_va = &mem->bo_va_list;
408 unsigned long bo_size = bo->tbo.mem.size;
411 pr_err("Invalid VA when adding BO to VM\n");
418 bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL);
422 pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
425 /* Add BO to VM internal data structures*/
426 bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
427 if (!bo_va_entry->bo_va) {
429 pr_err("Failed to add BO object to VM. ret == %d\n",
434 bo_va_entry->va = va;
435 bo_va_entry->pte_flags = get_pte_flags(adev, mem);
436 bo_va_entry->kgd_dev = (void *)adev;
437 list_add(&bo_va_entry->bo_list, list_bo_va);
440 *p_bo_va_entry = bo_va_entry;
442 /* Allocate validate page tables if needed */
443 ret = vm_validate_pt_pd_bos(vm);
445 pr_err("validate_pt_pd_bos() failed\n");
452 amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va);
453 list_del(&bo_va_entry->bo_list);
459 static void remove_bo_from_vm(struct amdgpu_device *adev,
460 struct kfd_bo_va_list *entry, unsigned long size)
462 pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n",
464 entry->va + size, entry);
465 amdgpu_vm_bo_rmv(adev, entry->bo_va);
466 list_del(&entry->bo_list);
470 static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
471 struct amdkfd_process_info *process_info,
474 struct ttm_validate_buffer *entry = &mem->validate_list;
475 struct amdgpu_bo *bo = mem->bo;
477 INIT_LIST_HEAD(&entry->head);
478 entry->num_shared = 1;
479 entry->bo = &bo->tbo;
480 mutex_lock(&process_info->lock);
482 list_add_tail(&entry->head, &process_info->userptr_valid_list);
484 list_add_tail(&entry->head, &process_info->kfd_bo_list);
485 mutex_unlock(&process_info->lock);
488 static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
489 struct amdkfd_process_info *process_info)
491 struct ttm_validate_buffer *bo_list_entry;
493 bo_list_entry = &mem->validate_list;
494 mutex_lock(&process_info->lock);
495 list_del(&bo_list_entry->head);
496 mutex_unlock(&process_info->lock);
499 /* Initializes user pages. It registers the MMU notifier and validates
500 * the userptr BO in the GTT domain.
502 * The BO must already be on the userptr_valid_list. Otherwise an
503 * eviction and restore may happen that leaves the new BO unmapped
504 * with the user mode queues running.
506 * Takes the process_info->lock to protect against concurrent restore
509 * Returns 0 for success, negative errno for errors.
511 static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
514 struct amdkfd_process_info *process_info = mem->process_info;
515 struct amdgpu_bo *bo = mem->bo;
516 struct ttm_operation_ctx ctx = { true, false };
519 mutex_lock(&process_info->lock);
521 ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0);
523 pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
527 ret = amdgpu_mn_register(bo, user_addr);
529 pr_err("%s: Failed to register MMU notifier: %d\n",
534 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
536 pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
540 ret = amdgpu_bo_reserve(bo, true);
542 pr_err("%s: Failed to reserve BO\n", __func__);
545 amdgpu_bo_placement_from_domain(bo, mem->domain);
546 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
548 pr_err("%s: failed to validate BO\n", __func__);
549 amdgpu_bo_unreserve(bo);
552 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
555 amdgpu_mn_unregister(bo);
557 mutex_unlock(&process_info->lock);
561 /* Reserving a BO and its page table BOs must happen atomically to
562 * avoid deadlocks. Some operations update multiple VMs at once. Track
563 * all the reservation info in a context structure. Optionally a sync
564 * object can track VM updates.
566 struct bo_vm_reservation_context {
567 struct amdgpu_bo_list_entry kfd_bo; /* BO list entry for the KFD BO */
568 unsigned int n_vms; /* Number of VMs reserved */
569 struct amdgpu_bo_list_entry *vm_pd; /* Array of VM BO list entries */
570 struct ww_acquire_ctx ticket; /* Reservation ticket */
571 struct list_head list, duplicates; /* BO lists */
572 struct amdgpu_sync *sync; /* Pointer to sync object */
573 bool reserved; /* Whether BOs are reserved */
577 BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
578 BO_VM_MAPPED, /* Match VMs where a BO is mapped */
579 BO_VM_ALL, /* Match all VMs a BO was added to */
583 * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
584 * @mem: KFD BO structure.
585 * @vm: the VM to reserve.
586 * @ctx: the struct that will be used in unreserve_bo_and_vms().
588 static int reserve_bo_and_vm(struct kgd_mem *mem,
589 struct amdgpu_vm *vm,
590 struct bo_vm_reservation_context *ctx)
592 struct amdgpu_bo *bo = mem->bo;
597 ctx->reserved = false;
599 ctx->sync = &mem->sync;
601 INIT_LIST_HEAD(&ctx->list);
602 INIT_LIST_HEAD(&ctx->duplicates);
604 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd), GFP_KERNEL);
608 ctx->kfd_bo.priority = 0;
609 ctx->kfd_bo.tv.bo = &bo->tbo;
610 ctx->kfd_bo.tv.num_shared = 1;
611 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
613 amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
615 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
616 false, &ctx->duplicates);
618 ctx->reserved = true;
620 pr_err("Failed to reserve buffers in ttm\n");
629 * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
630 * @mem: KFD BO structure.
631 * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
632 * is used. Otherwise, a single VM associated with the BO.
633 * @map_type: the mapping status that will be used to filter the VMs.
634 * @ctx: the struct that will be used in unreserve_bo_and_vms().
636 * Returns 0 for success, negative for failure.
638 static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
639 struct amdgpu_vm *vm, enum bo_vm_match map_type,
640 struct bo_vm_reservation_context *ctx)
642 struct amdgpu_bo *bo = mem->bo;
643 struct kfd_bo_va_list *entry;
647 ctx->reserved = false;
650 ctx->sync = &mem->sync;
652 INIT_LIST_HEAD(&ctx->list);
653 INIT_LIST_HEAD(&ctx->duplicates);
655 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
656 if ((vm && vm != entry->bo_va->base.vm) ||
657 (entry->is_mapped != map_type
658 && map_type != BO_VM_ALL))
664 if (ctx->n_vms != 0) {
665 ctx->vm_pd = kcalloc(ctx->n_vms, sizeof(*ctx->vm_pd),
671 ctx->kfd_bo.priority = 0;
672 ctx->kfd_bo.tv.bo = &bo->tbo;
673 ctx->kfd_bo.tv.num_shared = 1;
674 list_add(&ctx->kfd_bo.tv.head, &ctx->list);
677 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
678 if ((vm && vm != entry->bo_va->base.vm) ||
679 (entry->is_mapped != map_type
680 && map_type != BO_VM_ALL))
683 amdgpu_vm_get_pd_bo(entry->bo_va->base.vm, &ctx->list,
688 ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
689 false, &ctx->duplicates);
691 ctx->reserved = true;
693 pr_err("Failed to reserve buffers in ttm.\n");
704 * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
705 * @ctx: Reservation context to unreserve
706 * @wait: Optionally wait for a sync object representing pending VM updates
707 * @intr: Whether the wait is interruptible
709 * Also frees any resources allocated in
710 * reserve_bo_and_(cond_)vm(s). Returns the status from
713 static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
714 bool wait, bool intr)
719 ret = amdgpu_sync_wait(ctx->sync, intr);
722 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
727 ctx->reserved = false;
733 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
734 struct kfd_bo_va_list *entry,
735 struct amdgpu_sync *sync)
737 struct amdgpu_bo_va *bo_va = entry->bo_va;
738 struct amdgpu_vm *vm = bo_va->base.vm;
740 amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
742 amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
744 amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
749 static int update_gpuvm_pte(struct amdgpu_device *adev,
750 struct kfd_bo_va_list *entry,
751 struct amdgpu_sync *sync)
754 struct amdgpu_bo_va *bo_va = entry->bo_va;
756 /* Update the page tables */
757 ret = amdgpu_vm_bo_update(adev, bo_va, false);
759 pr_err("amdgpu_vm_bo_update failed\n");
763 return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
766 static int map_bo_to_gpuvm(struct amdgpu_device *adev,
767 struct kfd_bo_va_list *entry, struct amdgpu_sync *sync,
772 /* Set virtual address for the allocation */
773 ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
774 amdgpu_bo_size(entry->bo_va->base.bo),
777 pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
785 ret = update_gpuvm_pte(adev, entry, sync);
787 pr_err("update_gpuvm_pte() failed\n");
788 goto update_gpuvm_pte_failed;
793 update_gpuvm_pte_failed:
794 unmap_bo_from_gpuvm(adev, entry, sync);
798 static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
800 struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
804 if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
808 sg->sgl->dma_address = addr;
809 sg->sgl->length = size;
810 #ifdef CONFIG_NEED_SG_DMA_LENGTH
811 sg->sgl->dma_length = size;
816 static int process_validate_vms(struct amdkfd_process_info *process_info)
818 struct amdgpu_vm *peer_vm;
821 list_for_each_entry(peer_vm, &process_info->vm_list_head,
823 ret = vm_validate_pt_pd_bos(peer_vm);
831 static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
832 struct amdgpu_sync *sync)
834 struct amdgpu_vm *peer_vm;
837 list_for_each_entry(peer_vm, &process_info->vm_list_head,
839 struct amdgpu_bo *pd = peer_vm->root.base.bo;
841 ret = amdgpu_sync_resv(NULL,
842 sync, pd->tbo.base.resv,
843 AMDGPU_FENCE_OWNER_KFD, false);
851 static int process_update_pds(struct amdkfd_process_info *process_info,
852 struct amdgpu_sync *sync)
854 struct amdgpu_vm *peer_vm;
857 list_for_each_entry(peer_vm, &process_info->vm_list_head,
859 ret = vm_update_pds(peer_vm, sync);
867 static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
868 struct dma_fence **ef)
870 struct amdkfd_process_info *info = NULL;
873 if (!*process_info) {
874 info = kzalloc(sizeof(*info), GFP_KERNEL);
878 mutex_init(&info->lock);
879 INIT_LIST_HEAD(&info->vm_list_head);
880 INIT_LIST_HEAD(&info->kfd_bo_list);
881 INIT_LIST_HEAD(&info->userptr_valid_list);
882 INIT_LIST_HEAD(&info->userptr_inval_list);
884 info->eviction_fence =
885 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
887 if (!info->eviction_fence) {
888 pr_err("Failed to create eviction fence\n");
890 goto create_evict_fence_fail;
893 info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
894 atomic_set(&info->evicted_bos, 0);
895 INIT_DELAYED_WORK(&info->restore_userptr_work,
896 amdgpu_amdkfd_restore_userptr_worker);
898 *process_info = info;
899 *ef = dma_fence_get(&info->eviction_fence->base);
902 vm->process_info = *process_info;
904 /* Validate page directory and attach eviction fence */
905 ret = amdgpu_bo_reserve(vm->root.base.bo, true);
907 goto reserve_pd_fail;
908 ret = vm_validate_pt_pd_bos(vm);
910 pr_err("validate_pt_pd_bos() failed\n");
911 goto validate_pd_fail;
913 ret = amdgpu_bo_sync_wait(vm->root.base.bo,
914 AMDGPU_FENCE_OWNER_KFD, false);
917 ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
919 goto reserve_shared_fail;
920 amdgpu_bo_fence(vm->root.base.bo,
921 &vm->process_info->eviction_fence->base, true);
922 amdgpu_bo_unreserve(vm->root.base.bo);
924 /* Update process info */
925 mutex_lock(&vm->process_info->lock);
926 list_add_tail(&vm->vm_list_node,
927 &(vm->process_info->vm_list_head));
928 vm->process_info->n_vms++;
929 mutex_unlock(&vm->process_info->lock);
936 amdgpu_bo_unreserve(vm->root.base.bo);
938 vm->process_info = NULL;
940 /* Two fence references: one in info and one in *ef */
941 dma_fence_put(&info->eviction_fence->base);
944 *process_info = NULL;
946 create_evict_fence_fail:
947 mutex_destroy(&info->lock);
953 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, unsigned int pasid,
954 void **vm, void **process_info,
955 struct dma_fence **ef)
957 struct amdgpu_device *adev = get_amdgpu_device(kgd);
958 struct amdgpu_vm *new_vm;
961 new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
965 /* Initialize AMDGPU part of the VM */
966 ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
968 pr_err("Failed init vm ret %d\n", ret);
969 goto amdgpu_vm_init_fail;
972 /* Initialize KFD part of the VM and process info */
973 ret = init_kfd_vm(new_vm, process_info, ef);
975 goto init_kfd_vm_fail;
977 *vm = (void *) new_vm;
982 amdgpu_vm_fini(adev, new_vm);
988 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
989 struct file *filp, unsigned int pasid,
990 void **vm, void **process_info,
991 struct dma_fence **ef)
993 struct amdgpu_device *adev = get_amdgpu_device(kgd);
994 struct drm_file *drm_priv = filp->private_data;
995 struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv;
996 struct amdgpu_vm *avm = &drv_priv->vm;
999 /* Already a compute VM? */
1000 if (avm->process_info)
1003 /* Convert VM into a compute VM */
1004 ret = amdgpu_vm_make_compute(adev, avm, pasid);
1008 /* Initialize KFD part of the VM and process info */
1009 ret = init_kfd_vm(avm, process_info, ef);
1018 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
1019 struct amdgpu_vm *vm)
1021 struct amdkfd_process_info *process_info = vm->process_info;
1022 struct amdgpu_bo *pd = vm->root.base.bo;
1027 /* Release eviction fence from PD */
1028 amdgpu_bo_reserve(pd, false);
1029 amdgpu_bo_fence(pd, NULL, false);
1030 amdgpu_bo_unreserve(pd);
1032 /* Update process info */
1033 mutex_lock(&process_info->lock);
1034 process_info->n_vms--;
1035 list_del(&vm->vm_list_node);
1036 mutex_unlock(&process_info->lock);
1038 /* Release per-process resources when last compute VM is destroyed */
1039 if (!process_info->n_vms) {
1040 WARN_ON(!list_empty(&process_info->kfd_bo_list));
1041 WARN_ON(!list_empty(&process_info->userptr_valid_list));
1042 WARN_ON(!list_empty(&process_info->userptr_inval_list));
1044 dma_fence_put(&process_info->eviction_fence->base);
1045 cancel_delayed_work_sync(&process_info->restore_userptr_work);
1046 put_pid(process_info->pid);
1047 mutex_destroy(&process_info->lock);
1048 kfree(process_info);
1052 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
1054 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1055 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1057 if (WARN_ON(!kgd || !vm))
1060 pr_debug("Destroying process vm %p\n", vm);
1062 /* Release the VM context */
1063 amdgpu_vm_fini(adev, avm);
1067 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
1069 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1070 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1072 if (WARN_ON(!kgd || !vm))
1075 pr_debug("Releasing process vm %p\n", vm);
1077 /* The original pasid of amdgpu vm has already been
1078 * released during making a amdgpu vm to a compute vm
1079 * The current pasid is managed by kfd and will be
1080 * released on kfd process destroy. Set amdgpu pasid
1081 * to 0 to avoid duplicate release.
1083 amdgpu_vm_release_compute(adev, avm);
1086 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm)
1088 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1089 struct amdgpu_bo *pd = avm->root.base.bo;
1090 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
1092 if (adev->asic_type < CHIP_VEGA10)
1093 return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
1094 return avm->pd_phys_addr;
1097 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1098 struct kgd_dev *kgd, uint64_t va, uint64_t size,
1099 void *vm, struct kgd_mem **mem,
1100 uint64_t *offset, uint32_t flags)
1102 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1103 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1104 enum ttm_bo_type bo_type = ttm_bo_type_device;
1105 struct sg_table *sg = NULL;
1106 uint64_t user_addr = 0;
1107 struct amdgpu_bo *bo;
1108 struct amdgpu_bo_param bp;
1109 u32 domain, alloc_domain;
1114 * Check on which domain to allocate BO
1116 if (flags & ALLOC_MEM_FLAGS_VRAM) {
1117 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
1118 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
1119 alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
1120 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
1121 AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
1122 } else if (flags & ALLOC_MEM_FLAGS_GTT) {
1123 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
1125 } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
1126 domain = AMDGPU_GEM_DOMAIN_GTT;
1127 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1129 if (!offset || !*offset)
1131 user_addr = untagged_addr(*offset);
1132 } else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
1133 ALLOC_MEM_FLAGS_MMIO_REMAP)) {
1134 domain = AMDGPU_GEM_DOMAIN_GTT;
1135 alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
1136 bo_type = ttm_bo_type_sg;
1138 if (size > UINT_MAX)
1140 sg = create_doorbell_sg(*offset, size);
1147 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1152 INIT_LIST_HEAD(&(*mem)->bo_va_list);
1153 mutex_init(&(*mem)->lock);
1154 (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
1156 /* Workaround for AQL queue wraparound bug. Map the same
1157 * memory twice. That means we only actually allocate half
1160 if ((*mem)->aql_queue)
1163 (*mem)->alloc_flags = flags;
1165 amdgpu_sync_create(&(*mem)->sync);
1167 ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
1169 pr_debug("Insufficient system memory\n");
1170 goto err_reserve_limit;
1173 pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
1174 va, size, domain_string(alloc_domain));
1176 memset(&bp, 0, sizeof(bp));
1179 bp.domain = alloc_domain;
1180 bp.flags = alloc_flags;
1183 ret = amdgpu_bo_create(adev, &bp, &bo);
1185 pr_debug("Failed to create BO on domain %s. ret %d\n",
1186 domain_string(alloc_domain), ret);
1189 if (bo_type == ttm_bo_type_sg) {
1191 bo->tbo.ttm->sg = sg;
1196 bo->flags |= AMDGPU_AMDKFD_USERPTR_BO;
1199 (*mem)->domain = domain;
1200 (*mem)->mapped_to_gpu_memory = 0;
1201 (*mem)->process_info = avm->process_info;
1202 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
1205 ret = init_user_pages(*mem, current->mm, user_addr);
1207 goto allocate_init_user_pages_failed;
1211 *offset = amdgpu_bo_mmap_offset(bo);
1215 allocate_init_user_pages_failed:
1216 remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
1217 amdgpu_bo_unref(&bo);
1218 /* Don't unreserve system mem limit twice */
1219 goto err_reserve_limit;
1221 unreserve_mem_limit(adev, size, alloc_domain, !!sg);
1223 mutex_destroy(&(*mem)->lock);
1233 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1234 struct kgd_dev *kgd, struct kgd_mem *mem)
1236 struct amdkfd_process_info *process_info = mem->process_info;
1237 unsigned long bo_size = mem->bo->tbo.mem.size;
1238 struct kfd_bo_va_list *entry, *tmp;
1239 struct bo_vm_reservation_context ctx;
1240 struct ttm_validate_buffer *bo_list_entry;
1243 mutex_lock(&mem->lock);
1245 if (mem->mapped_to_gpu_memory > 0) {
1246 pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
1248 mutex_unlock(&mem->lock);
1252 mutex_unlock(&mem->lock);
1253 /* lock is not needed after this, since mem is unused and will
1257 /* No more MMU notifiers */
1258 amdgpu_mn_unregister(mem->bo);
1260 /* Make sure restore workers don't access the BO any more */
1261 bo_list_entry = &mem->validate_list;
1262 mutex_lock(&process_info->lock);
1263 list_del(&bo_list_entry->head);
1264 mutex_unlock(&process_info->lock);
1266 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
1270 /* The eviction fence should be removed by the last unmap.
1271 * TODO: Log an error condition if the bo still has the eviction fence
1274 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1275 process_info->eviction_fence);
1276 pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
1277 mem->va + bo_size * (1 + mem->aql_queue));
1279 /* Remove from VM internal data structures */
1280 list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list)
1281 remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev,
1284 ret = unreserve_bo_and_vms(&ctx, false, false);
1286 /* Free the sync object */
1287 amdgpu_sync_free(&mem->sync);
1289 /* If the SG is not NULL, it's one we created for a doorbell or mmio
1290 * remap BO. We need to free it.
1292 if (mem->bo->tbo.sg) {
1293 sg_free_table(mem->bo->tbo.sg);
1294 kfree(mem->bo->tbo.sg);
1298 amdgpu_bo_unref(&mem->bo);
1299 mutex_destroy(&mem->lock);
1305 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1306 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1308 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1309 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1311 struct amdgpu_bo *bo;
1313 struct kfd_bo_va_list *entry;
1314 struct bo_vm_reservation_context ctx;
1315 struct kfd_bo_va_list *bo_va_entry = NULL;
1316 struct kfd_bo_va_list *bo_va_entry_aql = NULL;
1317 unsigned long bo_size;
1318 bool is_invalid_userptr = false;
1322 pr_err("Invalid BO when mapping memory to GPU\n");
1326 /* Make sure restore is not running concurrently. Since we
1327 * don't map invalid userptr BOs, we rely on the next restore
1328 * worker to do the mapping
1330 mutex_lock(&mem->process_info->lock);
1332 /* Lock mmap-sem. If we find an invalid userptr BO, we can be
1333 * sure that the MMU notifier is no longer running
1334 * concurrently and the queues are actually stopped
1336 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1337 down_write(¤t->mm->mmap_sem);
1338 is_invalid_userptr = atomic_read(&mem->invalid);
1339 up_write(¤t->mm->mmap_sem);
1342 mutex_lock(&mem->lock);
1344 domain = mem->domain;
1345 bo_size = bo->tbo.mem.size;
1347 pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
1349 mem->va + bo_size * (1 + mem->aql_queue),
1350 vm, domain_string(domain));
1352 ret = reserve_bo_and_vm(mem, vm, &ctx);
1356 /* Userptr can be marked as "not invalid", but not actually be
1357 * validated yet (still in the system domain). In that case
1358 * the queues are still stopped and we can leave mapping for
1359 * the next restore worker
1361 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
1362 bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
1363 is_invalid_userptr = true;
1365 if (check_if_add_bo_to_vm(avm, mem)) {
1366 ret = add_bo_to_vm(adev, mem, avm, false,
1369 goto add_bo_to_vm_failed;
1370 if (mem->aql_queue) {
1371 ret = add_bo_to_vm(adev, mem, avm,
1372 true, &bo_va_entry_aql);
1374 goto add_bo_to_vm_failed_aql;
1377 ret = vm_validate_pt_pd_bos(avm);
1379 goto add_bo_to_vm_failed;
1382 if (mem->mapped_to_gpu_memory == 0 &&
1383 !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1384 /* Validate BO only once. The eviction fence gets added to BO
1385 * the first time it is mapped. Validate will wait for all
1386 * background evictions to complete.
1388 ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
1390 pr_debug("Validate failed\n");
1391 goto map_bo_to_gpuvm_failed;
1395 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1396 if (entry->bo_va->base.vm == vm && !entry->is_mapped) {
1397 pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
1398 entry->va, entry->va + bo_size,
1401 ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1402 is_invalid_userptr);
1404 pr_err("Failed to map bo to gpuvm\n");
1405 goto map_bo_to_gpuvm_failed;
1408 ret = vm_update_pds(vm, ctx.sync);
1410 pr_err("Failed to update page directories\n");
1411 goto map_bo_to_gpuvm_failed;
1414 entry->is_mapped = true;
1415 mem->mapped_to_gpu_memory++;
1416 pr_debug("\t INC mapping count %d\n",
1417 mem->mapped_to_gpu_memory);
1421 if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
1423 &avm->process_info->eviction_fence->base,
1425 ret = unreserve_bo_and_vms(&ctx, false, false);
1429 map_bo_to_gpuvm_failed:
1430 if (bo_va_entry_aql)
1431 remove_bo_from_vm(adev, bo_va_entry_aql, bo_size);
1432 add_bo_to_vm_failed_aql:
1434 remove_bo_from_vm(adev, bo_va_entry, bo_size);
1435 add_bo_to_vm_failed:
1436 unreserve_bo_and_vms(&ctx, false, false);
1438 mutex_unlock(&mem->process_info->lock);
1439 mutex_unlock(&mem->lock);
1443 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1444 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm)
1446 struct amdgpu_device *adev = get_amdgpu_device(kgd);
1447 struct amdkfd_process_info *process_info =
1448 ((struct amdgpu_vm *)vm)->process_info;
1449 unsigned long bo_size = mem->bo->tbo.mem.size;
1450 struct kfd_bo_va_list *entry;
1451 struct bo_vm_reservation_context ctx;
1454 mutex_lock(&mem->lock);
1456 ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx);
1459 /* If no VMs were reserved, it means the BO wasn't actually mapped */
1460 if (ctx.n_vms == 0) {
1465 ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm);
1469 pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
1471 mem->va + bo_size * (1 + mem->aql_queue),
1474 list_for_each_entry(entry, &mem->bo_va_list, bo_list) {
1475 if (entry->bo_va->base.vm == vm && entry->is_mapped) {
1476 pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
1478 entry->va + bo_size,
1481 ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1483 entry->is_mapped = false;
1485 pr_err("failed to unmap VA 0x%llx\n",
1490 mem->mapped_to_gpu_memory--;
1491 pr_debug("\t DEC mapping count %d\n",
1492 mem->mapped_to_gpu_memory);
1496 /* If BO is unmapped from all VMs, unfence it. It can be evicted if
1499 if (mem->mapped_to_gpu_memory == 0 &&
1500 !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
1501 amdgpu_amdkfd_remove_eviction_fence(mem->bo,
1502 process_info->eviction_fence);
1505 unreserve_bo_and_vms(&ctx, false, false);
1507 mutex_unlock(&mem->lock);
1511 int amdgpu_amdkfd_gpuvm_sync_memory(
1512 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr)
1514 struct amdgpu_sync sync;
1517 amdgpu_sync_create(&sync);
1519 mutex_lock(&mem->lock);
1520 amdgpu_sync_clone(&mem->sync, &sync);
1521 mutex_unlock(&mem->lock);
1523 ret = amdgpu_sync_wait(&sync, intr);
1524 amdgpu_sync_free(&sync);
1528 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
1529 struct kgd_mem *mem, void **kptr, uint64_t *size)
1532 struct amdgpu_bo *bo = mem->bo;
1534 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
1535 pr_err("userptr can't be mapped to kernel\n");
1539 /* delete kgd_mem from kfd_bo_list to avoid re-validating
1540 * this BO in BO's restoring after eviction.
1542 mutex_lock(&mem->process_info->lock);
1544 ret = amdgpu_bo_reserve(bo, true);
1546 pr_err("Failed to reserve bo. ret %d\n", ret);
1547 goto bo_reserve_failed;
1550 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
1552 pr_err("Failed to pin bo. ret %d\n", ret);
1556 ret = amdgpu_bo_kmap(bo, kptr);
1558 pr_err("Failed to map bo to kernel. ret %d\n", ret);
1562 amdgpu_amdkfd_remove_eviction_fence(
1563 bo, mem->process_info->eviction_fence);
1564 list_del_init(&mem->validate_list.head);
1567 *size = amdgpu_bo_size(bo);
1569 amdgpu_bo_unreserve(bo);
1571 mutex_unlock(&mem->process_info->lock);
1575 amdgpu_bo_unpin(bo);
1577 amdgpu_bo_unreserve(bo);
1579 mutex_unlock(&mem->process_info->lock);
1584 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
1585 struct kfd_vm_fault_info *mem)
1587 struct amdgpu_device *adev;
1589 adev = (struct amdgpu_device *)kgd;
1590 if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
1591 *mem = *adev->gmc.vm_fault_info;
1593 atomic_set(&adev->gmc.vm_fault_info_updated, 0);
1598 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
1599 struct dma_buf *dma_buf,
1600 uint64_t va, void *vm,
1601 struct kgd_mem **mem, uint64_t *size,
1602 uint64_t *mmap_offset)
1604 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
1605 struct drm_gem_object *obj;
1606 struct amdgpu_bo *bo;
1607 struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
1609 if (dma_buf->ops != &amdgpu_dmabuf_ops)
1610 /* Can't handle non-graphics buffers */
1613 obj = dma_buf->priv;
1614 if (obj->dev->dev_private != adev)
1615 /* Can't handle buffers from other devices */
1618 bo = gem_to_amdgpu_bo(obj);
1619 if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
1620 AMDGPU_GEM_DOMAIN_GTT)))
1621 /* Only VRAM and GTT BOs are supported */
1624 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
1629 *size = amdgpu_bo_size(bo);
1632 *mmap_offset = amdgpu_bo_mmap_offset(bo);
1634 INIT_LIST_HEAD(&(*mem)->bo_va_list);
1635 mutex_init(&(*mem)->lock);
1636 (*mem)->alloc_flags =
1637 ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1638 ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) |
1639 ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE;
1641 (*mem)->bo = amdgpu_bo_ref(bo);
1643 (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
1644 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
1645 (*mem)->mapped_to_gpu_memory = 0;
1646 (*mem)->process_info = avm->process_info;
1647 add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
1648 amdgpu_sync_create(&(*mem)->sync);
1653 /* Evict a userptr BO by stopping the queues if necessary
1655 * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
1656 * cannot do any memory allocations, and cannot take any locks that
1657 * are held elsewhere while allocating memory. Therefore this is as
1658 * simple as possible, using atomic counters.
1660 * It doesn't do anything to the BO itself. The real work happens in
1661 * restore, where we get updated page addresses. This function only
1662 * ensures that GPU access to the BO is stopped.
1664 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
1665 struct mm_struct *mm)
1667 struct amdkfd_process_info *process_info = mem->process_info;
1668 int invalid, evicted_bos;
1671 invalid = atomic_inc_return(&mem->invalid);
1672 evicted_bos = atomic_inc_return(&process_info->evicted_bos);
1673 if (evicted_bos == 1) {
1674 /* First eviction, stop the queues */
1675 r = kgd2kfd_quiesce_mm(mm);
1677 pr_err("Failed to quiesce KFD\n");
1678 schedule_delayed_work(&process_info->restore_userptr_work,
1679 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1685 /* Update invalid userptr BOs
1687 * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
1688 * userptr_inval_list and updates user pages for all BOs that have
1689 * been invalidated since their last update.
1691 static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
1692 struct mm_struct *mm)
1694 struct kgd_mem *mem, *tmp_mem;
1695 struct amdgpu_bo *bo;
1696 struct ttm_operation_ctx ctx = { false, false };
1699 /* Move all invalidated BOs to the userptr_inval_list and
1700 * release their user pages by migration to the CPU domain
1702 list_for_each_entry_safe(mem, tmp_mem,
1703 &process_info->userptr_valid_list,
1704 validate_list.head) {
1705 if (!atomic_read(&mem->invalid))
1706 continue; /* BO is still valid */
1710 if (amdgpu_bo_reserve(bo, true))
1712 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
1713 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1714 amdgpu_bo_unreserve(bo);
1716 pr_err("%s: Failed to invalidate userptr BO\n",
1721 list_move_tail(&mem->validate_list.head,
1722 &process_info->userptr_inval_list);
1725 if (list_empty(&process_info->userptr_inval_list))
1726 return 0; /* All evicted userptr BOs were freed */
1728 /* Go through userptr_inval_list and update any invalid user_pages */
1729 list_for_each_entry(mem, &process_info->userptr_inval_list,
1730 validate_list.head) {
1731 invalid = atomic_read(&mem->invalid);
1733 /* BO hasn't been invalidated since the last
1734 * revalidation attempt. Keep its BO list.
1740 /* Get updated user pages */
1741 ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
1743 pr_debug("%s: Failed to get user pages: %d\n",
1746 /* Return error -EBUSY or -ENOMEM, retry restore */
1750 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1752 /* Mark the BO as valid unless it was invalidated
1753 * again concurrently.
1755 if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
1762 /* Validate invalid userptr BOs
1764 * Validates BOs on the userptr_inval_list, and moves them back to the
1765 * userptr_valid_list. Also updates GPUVM page tables with new page
1766 * addresses and waits for the page table updates to complete.
1768 static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
1770 struct amdgpu_bo_list_entry *pd_bo_list_entries;
1771 struct list_head resv_list, duplicates;
1772 struct ww_acquire_ctx ticket;
1773 struct amdgpu_sync sync;
1775 struct amdgpu_vm *peer_vm;
1776 struct kgd_mem *mem, *tmp_mem;
1777 struct amdgpu_bo *bo;
1778 struct ttm_operation_ctx ctx = { false, false };
1781 pd_bo_list_entries = kcalloc(process_info->n_vms,
1782 sizeof(struct amdgpu_bo_list_entry),
1784 if (!pd_bo_list_entries) {
1785 pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
1790 INIT_LIST_HEAD(&resv_list);
1791 INIT_LIST_HEAD(&duplicates);
1793 /* Get all the page directory BOs that need to be reserved */
1795 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1797 amdgpu_vm_get_pd_bo(peer_vm, &resv_list,
1798 &pd_bo_list_entries[i++]);
1799 /* Add the userptr_inval_list entries to resv_list */
1800 list_for_each_entry(mem, &process_info->userptr_inval_list,
1801 validate_list.head) {
1802 list_add_tail(&mem->resv_list.head, &resv_list);
1803 mem->resv_list.bo = mem->validate_list.bo;
1804 mem->resv_list.num_shared = mem->validate_list.num_shared;
1807 /* Reserve all BOs and page tables for validation */
1808 ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
1809 WARN(!list_empty(&duplicates), "Duplicates should be empty");
1813 amdgpu_sync_create(&sync);
1815 ret = process_validate_vms(process_info);
1819 /* Validate BOs and update GPUVM page tables */
1820 list_for_each_entry_safe(mem, tmp_mem,
1821 &process_info->userptr_inval_list,
1822 validate_list.head) {
1823 struct kfd_bo_va_list *bo_va_entry;
1827 /* Validate the BO if we got user pages */
1828 if (bo->tbo.ttm->pages[0]) {
1829 amdgpu_bo_placement_from_domain(bo, mem->domain);
1830 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1832 pr_err("%s: failed to validate BO\n", __func__);
1837 list_move_tail(&mem->validate_list.head,
1838 &process_info->userptr_valid_list);
1840 /* Update mapping. If the BO was not validated
1841 * (because we couldn't get user pages), this will
1842 * clear the page table entries, which will result in
1843 * VM faults if the GPU tries to access the invalid
1846 list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) {
1847 if (!bo_va_entry->is_mapped)
1850 ret = update_gpuvm_pte((struct amdgpu_device *)
1851 bo_va_entry->kgd_dev,
1852 bo_va_entry, &sync);
1854 pr_err("%s: update PTE failed\n", __func__);
1855 /* make sure this gets validated again */
1856 atomic_inc(&mem->invalid);
1862 /* Update page directories */
1863 ret = process_update_pds(process_info, &sync);
1866 ttm_eu_backoff_reservation(&ticket, &resv_list);
1867 amdgpu_sync_wait(&sync, false);
1868 amdgpu_sync_free(&sync);
1870 kfree(pd_bo_list_entries);
1876 /* Worker callback to restore evicted userptr BOs
1878 * Tries to update and validate all userptr BOs. If successful and no
1879 * concurrent evictions happened, the queues are restarted. Otherwise,
1880 * reschedule for another attempt later.
1882 static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
1884 struct delayed_work *dwork = to_delayed_work(work);
1885 struct amdkfd_process_info *process_info =
1886 container_of(dwork, struct amdkfd_process_info,
1887 restore_userptr_work);
1888 struct task_struct *usertask;
1889 struct mm_struct *mm;
1892 evicted_bos = atomic_read(&process_info->evicted_bos);
1896 /* Reference task and mm in case of concurrent process termination */
1897 usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
1900 mm = get_task_mm(usertask);
1902 put_task_struct(usertask);
1906 mutex_lock(&process_info->lock);
1908 if (update_invalid_user_pages(process_info, mm))
1910 /* userptr_inval_list can be empty if all evicted userptr BOs
1911 * have been freed. In that case there is nothing to validate
1912 * and we can just restart the queues.
1914 if (!list_empty(&process_info->userptr_inval_list)) {
1915 if (atomic_read(&process_info->evicted_bos) != evicted_bos)
1916 goto unlock_out; /* Concurrent eviction, try again */
1918 if (validate_invalid_user_pages(process_info))
1921 /* Final check for concurrent evicton and atomic update. If
1922 * another eviction happens after successful update, it will
1923 * be a first eviction that calls quiesce_mm. The eviction
1924 * reference counting inside KFD will handle this case.
1926 if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) !=
1930 if (kgd2kfd_resume_mm(mm)) {
1931 pr_err("%s: Failed to resume KFD\n", __func__);
1932 /* No recovery from this failure. Probably the CP is
1933 * hanging. No point trying again.
1938 mutex_unlock(&process_info->lock);
1940 put_task_struct(usertask);
1942 /* If validation failed, reschedule another attempt */
1944 schedule_delayed_work(&process_info->restore_userptr_work,
1945 msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
1948 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
1949 * KFD process identified by process_info
1951 * @process_info: amdkfd_process_info of the KFD process
1953 * After memory eviction, restore thread calls this function. The function
1954 * should be called when the Process is still valid. BO restore involves -
1956 * 1. Release old eviction fence and create new one
1957 * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
1958 * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
1959 * BOs that need to be reserved.
1960 * 4. Reserve all the BOs
1961 * 5. Validate of PD and PT BOs.
1962 * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
1963 * 7. Add fence to all PD and PT BOs.
1964 * 8. Unreserve all BOs
1966 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
1968 struct amdgpu_bo_list_entry *pd_bo_list;
1969 struct amdkfd_process_info *process_info = info;
1970 struct amdgpu_vm *peer_vm;
1971 struct kgd_mem *mem;
1972 struct bo_vm_reservation_context ctx;
1973 struct amdgpu_amdkfd_fence *new_fence;
1975 struct list_head duplicate_save;
1976 struct amdgpu_sync sync_obj;
1978 INIT_LIST_HEAD(&duplicate_save);
1979 INIT_LIST_HEAD(&ctx.list);
1980 INIT_LIST_HEAD(&ctx.duplicates);
1982 pd_bo_list = kcalloc(process_info->n_vms,
1983 sizeof(struct amdgpu_bo_list_entry),
1989 mutex_lock(&process_info->lock);
1990 list_for_each_entry(peer_vm, &process_info->vm_list_head,
1992 amdgpu_vm_get_pd_bo(peer_vm, &ctx.list, &pd_bo_list[i++]);
1994 /* Reserve all BOs and page tables/directory. Add all BOs from
1995 * kfd_bo_list to ctx.list
1997 list_for_each_entry(mem, &process_info->kfd_bo_list,
1998 validate_list.head) {
2000 list_add_tail(&mem->resv_list.head, &ctx.list);
2001 mem->resv_list.bo = mem->validate_list.bo;
2002 mem->resv_list.num_shared = mem->validate_list.num_shared;
2005 ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
2006 false, &duplicate_save);
2008 pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
2009 goto ttm_reserve_fail;
2012 amdgpu_sync_create(&sync_obj);
2014 /* Validate PDs and PTs */
2015 ret = process_validate_vms(process_info);
2017 goto validate_map_fail;
2019 ret = process_sync_pds_resv(process_info, &sync_obj);
2021 pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
2022 goto validate_map_fail;
2025 /* Validate BOs and map them to GPUVM (update VM page tables). */
2026 list_for_each_entry(mem, &process_info->kfd_bo_list,
2027 validate_list.head) {
2029 struct amdgpu_bo *bo = mem->bo;
2030 uint32_t domain = mem->domain;
2031 struct kfd_bo_va_list *bo_va_entry;
2033 ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
2035 pr_debug("Memory eviction: Validate BOs failed. Try again\n");
2036 goto validate_map_fail;
2038 ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
2040 pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
2041 goto validate_map_fail;
2043 list_for_each_entry(bo_va_entry, &mem->bo_va_list,
2045 ret = update_gpuvm_pte((struct amdgpu_device *)
2046 bo_va_entry->kgd_dev,
2050 pr_debug("Memory eviction: update PTE failed. Try again\n");
2051 goto validate_map_fail;
2056 /* Update page directories */
2057 ret = process_update_pds(process_info, &sync_obj);
2059 pr_debug("Memory eviction: update PDs failed. Try again\n");
2060 goto validate_map_fail;
2063 /* Wait for validate and PT updates to finish */
2064 amdgpu_sync_wait(&sync_obj, false);
2066 /* Release old eviction fence and create new one, because fence only
2067 * goes from unsignaled to signaled, fence cannot be reused.
2068 * Use context and mm from the old fence.
2070 new_fence = amdgpu_amdkfd_fence_create(
2071 process_info->eviction_fence->base.context,
2072 process_info->eviction_fence->mm);
2074 pr_err("Failed to create eviction fence\n");
2076 goto validate_map_fail;
2078 dma_fence_put(&process_info->eviction_fence->base);
2079 process_info->eviction_fence = new_fence;
2080 *ef = dma_fence_get(&new_fence->base);
2082 /* Attach new eviction fence to all BOs */
2083 list_for_each_entry(mem, &process_info->kfd_bo_list,
2085 amdgpu_bo_fence(mem->bo,
2086 &process_info->eviction_fence->base, true);
2088 /* Attach eviction fence to PD / PT BOs */
2089 list_for_each_entry(peer_vm, &process_info->vm_list_head,
2091 struct amdgpu_bo *bo = peer_vm->root.base.bo;
2093 amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true);
2097 ttm_eu_backoff_reservation(&ctx.ticket, &ctx.list);
2098 amdgpu_sync_free(&sync_obj);
2100 mutex_unlock(&process_info->lock);
2105 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
2107 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2108 struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
2114 *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
2118 mutex_init(&(*mem)->lock);
2119 (*mem)->bo = amdgpu_bo_ref(gws_bo);
2120 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
2121 (*mem)->process_info = process_info;
2122 add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
2123 amdgpu_sync_create(&(*mem)->sync);
2126 /* Validate gws bo the first time it is added to process */
2127 mutex_lock(&(*mem)->process_info->lock);
2128 ret = amdgpu_bo_reserve(gws_bo, false);
2129 if (unlikely(ret)) {
2130 pr_err("Reserve gws bo failed %d\n", ret);
2131 goto bo_reservation_failure;
2134 ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
2136 pr_err("GWS BO validate failed %d\n", ret);
2137 goto bo_validation_failure;
2139 /* GWS resource is shared b/t amdgpu and amdkfd
2140 * Add process eviction fence to bo so they can
2143 ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
2145 goto reserve_shared_fail;
2146 amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
2147 amdgpu_bo_unreserve(gws_bo);
2148 mutex_unlock(&(*mem)->process_info->lock);
2152 reserve_shared_fail:
2153 bo_validation_failure:
2154 amdgpu_bo_unreserve(gws_bo);
2155 bo_reservation_failure:
2156 mutex_unlock(&(*mem)->process_info->lock);
2157 amdgpu_sync_free(&(*mem)->sync);
2158 remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
2159 amdgpu_bo_unref(&gws_bo);
2160 mutex_destroy(&(*mem)->lock);
2166 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
2169 struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
2170 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
2171 struct amdgpu_bo *gws_bo = kgd_mem->bo;
2173 /* Remove BO from process's validate list so restore worker won't touch
2176 remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
2178 ret = amdgpu_bo_reserve(gws_bo, false);
2179 if (unlikely(ret)) {
2180 pr_err("Reserve gws bo failed %d\n", ret);
2181 //TODO add BO back to validate_list?
2184 amdgpu_amdkfd_remove_eviction_fence(gws_bo,
2185 process_info->eviction_fence);
2186 amdgpu_bo_unreserve(gws_bo);
2187 amdgpu_sync_free(&kgd_mem->sync);
2188 amdgpu_bo_unref(&gws_bo);
2189 mutex_destroy(&kgd_mem->lock);