2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/mutex.h>
24 #include <linux/log2.h>
25 #include <linux/sched.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/task.h>
28 #include <linux/slab.h>
29 #include <linux/amd-iommu.h>
30 #include <linux/notifier.h>
31 #include <linux/compat.h>
32 #include <linux/mman.h>
33 #include <linux/file.h>
34 #include "amdgpu_amdkfd.h"
39 #include "kfd_device_queue_manager.h"
40 #include "kfd_dbgmgr.h"
41 #include "kfd_iommu.h"
44 * List of struct kfd_process (field kfd_process).
45 * Unique/indexed by mm_struct*
47 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
48 static DEFINE_MUTEX(kfd_processes_mutex);
50 DEFINE_SRCU(kfd_processes_srcu);
52 /* For process termination handling */
53 static struct workqueue_struct *kfd_process_wq;
55 /* Ordered, single-threaded workqueue for restoring evicted
56 * processes. Restoring multiple processes concurrently under memory
57 * pressure can lead to processes blocking each other from validating
58 * their BOs and result in a live-lock situation where processes
59 * remain evicted indefinitely.
61 static struct workqueue_struct *kfd_restore_wq;
63 static struct kfd_process *find_process(const struct task_struct *thread);
64 static void kfd_process_ref_release(struct kref *ref);
65 static struct kfd_process *create_process(const struct task_struct *thread);
66 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
68 static void evict_process_worker(struct work_struct *work);
69 static void restore_process_worker(struct work_struct *work);
71 struct kfd_procfs_tree {
75 static struct kfd_procfs_tree procfs;
77 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
82 if (strcmp(attr->name, "pasid") == 0) {
83 struct kfd_process *p = container_of(attr, struct kfd_process,
87 pr_err("Invalid attribute");
91 return snprintf(buffer, PAGE_SIZE, "%d\n", val);
94 static void kfd_procfs_kobj_release(struct kobject *kobj)
99 static const struct sysfs_ops kfd_procfs_ops = {
100 .show = kfd_procfs_show,
103 static struct kobj_type procfs_type = {
104 .release = kfd_procfs_kobj_release,
105 .sysfs_ops = &kfd_procfs_ops,
108 void kfd_procfs_init(void)
112 procfs.kobj = kfd_alloc_struct(procfs.kobj);
116 ret = kobject_init_and_add(procfs.kobj, &procfs_type,
117 &kfd_device->kobj, "proc");
119 pr_warn("Could not create procfs proc folder");
120 /* If we fail to create the procfs, clean up */
121 kfd_procfs_shutdown();
125 void kfd_procfs_shutdown(void)
128 kobject_del(procfs.kobj);
129 kobject_put(procfs.kobj);
134 int kfd_process_create_wq(void)
137 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
139 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
141 if (!kfd_process_wq || !kfd_restore_wq) {
142 kfd_process_destroy_wq();
149 void kfd_process_destroy_wq(void)
151 if (kfd_process_wq) {
152 destroy_workqueue(kfd_process_wq);
153 kfd_process_wq = NULL;
155 if (kfd_restore_wq) {
156 destroy_workqueue(kfd_restore_wq);
157 kfd_restore_wq = NULL;
161 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
162 struct kfd_process_device *pdd)
164 struct kfd_dev *dev = pdd->dev;
166 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
167 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem);
170 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
171 * This function should be only called right after the process
172 * is created and when kfd_processes_mutex is still being held
173 * to avoid concurrency. Because of that exclusiveness, we do
174 * not need to take p->mutex.
176 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
177 uint64_t gpu_va, uint32_t size,
178 uint32_t flags, void **kptr)
180 struct kfd_dev *kdev = pdd->dev;
181 struct kgd_mem *mem = NULL;
185 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
186 pdd->vm, &mem, NULL, flags);
190 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
194 err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true);
196 pr_debug("Sync memory failed, wait interrupted by user signal\n");
197 goto sync_memory_failed;
200 /* Create an obj handle so kfd_process_device_remove_obj_handle
201 * will take care of the bo removal when the process finishes.
202 * We do not need to take p->mutex, because the process is just
203 * created and the ioctls have not had the chance to run.
205 handle = kfd_process_device_create_obj_handle(pdd, mem);
213 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
214 (struct kgd_mem *)mem, kptr, NULL);
216 pr_debug("Map GTT BO to kernel failed\n");
217 goto free_obj_handle;
224 kfd_process_device_remove_obj_handle(pdd, handle);
227 kfd_process_free_gpuvm(mem, pdd);
231 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem);
237 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
238 * process for IB usage The memory reserved is for KFD to submit
239 * IB to AMDGPU from kernel. If the memory is reserved
240 * successfully, ib_kaddr will have the CPU/kernel
241 * address. Check ib_kaddr before accessing the memory.
243 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
245 struct qcm_process_device *qpd = &pdd->qpd;
246 uint32_t flags = ALLOC_MEM_FLAGS_GTT |
247 ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
248 ALLOC_MEM_FLAGS_WRITABLE |
249 ALLOC_MEM_FLAGS_EXECUTABLE;
253 if (qpd->ib_kaddr || !qpd->ib_base)
256 /* ib_base is only set for dGPU */
257 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
262 qpd->ib_kaddr = kaddr;
267 struct kfd_process *kfd_create_process(struct file *filep)
269 struct kfd_process *process;
270 struct task_struct *thread = current;
274 return ERR_PTR(-EINVAL);
276 /* Only the pthreads threading model is supported. */
277 if (thread->group_leader->mm != thread->mm)
278 return ERR_PTR(-EINVAL);
281 * take kfd processes mutex before starting of process creation
282 * so there won't be a case where two threads of the same process
283 * create two kfd_process structures
285 mutex_lock(&kfd_processes_mutex);
287 /* A prior open of /dev/kfd could have already created the process. */
288 process = find_process(thread);
290 pr_debug("Process already found\n");
292 process = create_process(thread);
296 ret = kfd_process_init_cwsr_apu(process, filep);
298 process = ERR_PTR(ret);
305 process->kobj = kfd_alloc_struct(process->kobj);
306 if (!process->kobj) {
307 pr_warn("Creating procfs kobject failed");
310 ret = kobject_init_and_add(process->kobj, &procfs_type,
312 (int)process->lead_thread->pid);
314 pr_warn("Creating procfs pid directory failed");
318 process->attr_pasid.name = "pasid";
319 process->attr_pasid.mode = KFD_SYSFS_FILE_MODE;
320 sysfs_attr_init(&process->attr_pasid);
321 ret = sysfs_create_file(process->kobj, &process->attr_pasid);
323 pr_warn("Creating pasid for pid %d failed",
324 (int)process->lead_thread->pid);
327 mutex_unlock(&kfd_processes_mutex);
332 struct kfd_process *kfd_get_process(const struct task_struct *thread)
334 struct kfd_process *process;
337 return ERR_PTR(-EINVAL);
339 /* Only the pthreads threading model is supported. */
340 if (thread->group_leader->mm != thread->mm)
341 return ERR_PTR(-EINVAL);
343 process = find_process(thread);
345 return ERR_PTR(-EINVAL);
350 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
352 struct kfd_process *process;
354 hash_for_each_possible_rcu(kfd_processes_table, process,
355 kfd_processes, (uintptr_t)mm)
356 if (process->mm == mm)
362 static struct kfd_process *find_process(const struct task_struct *thread)
364 struct kfd_process *p;
367 idx = srcu_read_lock(&kfd_processes_srcu);
368 p = find_process_by_mm(thread->mm);
369 srcu_read_unlock(&kfd_processes_srcu, idx);
374 void kfd_unref_process(struct kfd_process *p)
376 kref_put(&p->ref, kfd_process_ref_release);
379 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
381 struct kfd_process *p = pdd->process;
386 * Remove all handles from idr and release appropriate
387 * local memory object
389 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
390 struct kfd_process_device *peer_pdd;
392 list_for_each_entry(peer_pdd, &p->per_device_data,
396 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
397 peer_pdd->dev->kgd, mem, peer_pdd->vm);
400 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem);
401 kfd_process_device_remove_obj_handle(pdd, id);
405 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
407 struct kfd_process_device *pdd;
409 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
410 kfd_process_device_free_bos(pdd);
413 static void kfd_process_destroy_pdds(struct kfd_process *p)
415 struct kfd_process_device *pdd, *temp;
417 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
419 pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
420 pdd->dev->id, p->pasid);
423 amdgpu_amdkfd_gpuvm_release_process_vm(
424 pdd->dev->kgd, pdd->vm);
428 amdgpu_amdkfd_gpuvm_destroy_process_vm(
429 pdd->dev->kgd, pdd->vm);
431 list_del(&pdd->per_device_list);
433 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
434 free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
435 get_order(KFD_CWSR_TBA_TMA_SIZE));
437 kfree(pdd->qpd.doorbell_bitmap);
438 idr_destroy(&pdd->alloc_idr);
444 /* No process locking is needed in this function, because the process
445 * is not findable any more. We must assume that no other thread is
446 * using it any more, otherwise we couldn't safely free the process
447 * structure in the end.
449 static void kfd_process_wq_release(struct work_struct *work)
451 struct kfd_process *p = container_of(work, struct kfd_process,
454 /* Remove the procfs files */
456 sysfs_remove_file(p->kobj, &p->attr_pasid);
457 kobject_del(p->kobj);
458 kobject_put(p->kobj);
462 kfd_iommu_unbind_process(p);
464 kfd_process_free_outstanding_kfd_bos(p);
466 kfd_process_destroy_pdds(p);
467 dma_fence_put(p->ef);
469 kfd_event_free_process(p);
471 kfd_pasid_free(p->pasid);
472 kfd_free_process_doorbells(p);
474 mutex_destroy(&p->mutex);
476 put_task_struct(p->lead_thread);
481 static void kfd_process_ref_release(struct kref *ref)
483 struct kfd_process *p = container_of(ref, struct kfd_process, ref);
485 INIT_WORK(&p->release_work, kfd_process_wq_release);
486 queue_work(kfd_process_wq, &p->release_work);
489 static void kfd_process_free_notifier(struct mmu_notifier *mn)
491 kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
494 static void kfd_process_notifier_release(struct mmu_notifier *mn,
495 struct mm_struct *mm)
497 struct kfd_process *p;
498 struct kfd_process_device *pdd = NULL;
501 * The kfd_process structure can not be free because the
502 * mmu_notifier srcu is read locked
504 p = container_of(mn, struct kfd_process, mmu_notifier);
505 if (WARN_ON(p->mm != mm))
508 mutex_lock(&kfd_processes_mutex);
509 hash_del_rcu(&p->kfd_processes);
510 mutex_unlock(&kfd_processes_mutex);
511 synchronize_srcu(&kfd_processes_srcu);
513 cancel_delayed_work_sync(&p->eviction_work);
514 cancel_delayed_work_sync(&p->restore_work);
516 mutex_lock(&p->mutex);
518 /* Iterate over all process device data structures and if the
519 * pdd is in debug mode, we should first force unregistration,
520 * then we will be able to destroy the queues
522 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
523 struct kfd_dev *dev = pdd->dev;
525 mutex_lock(kfd_get_dbgmgr_mutex());
526 if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
527 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
528 kfd_dbgmgr_destroy(dev->dbgmgr);
532 mutex_unlock(kfd_get_dbgmgr_mutex());
535 kfd_process_dequeue_from_all_devices(p);
538 /* Indicate to other users that MM is no longer valid */
541 mutex_unlock(&p->mutex);
543 mmu_notifier_put(&p->mmu_notifier);
546 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
547 .release = kfd_process_notifier_release,
548 .free_notifier = kfd_process_free_notifier,
551 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
553 unsigned long offset;
554 struct kfd_process_device *pdd;
556 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
557 struct kfd_dev *dev = pdd->dev;
558 struct qcm_process_device *qpd = &pdd->qpd;
560 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
563 offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id))
565 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
566 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
569 if (IS_ERR_VALUE(qpd->tba_addr)) {
570 int err = qpd->tba_addr;
572 pr_err("Failure to set tba address. error %d.\n", err);
574 qpd->cwsr_kaddr = NULL;
578 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
580 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
581 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
582 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
588 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
590 struct kfd_dev *dev = pdd->dev;
591 struct qcm_process_device *qpd = &pdd->qpd;
592 uint32_t flags = ALLOC_MEM_FLAGS_GTT |
593 ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
597 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
600 /* cwsr_base is only set for dGPU */
601 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
602 KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr);
606 qpd->cwsr_kaddr = kaddr;
607 qpd->tba_addr = qpd->cwsr_base;
609 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
611 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
612 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
613 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
619 * On return the kfd_process is fully operational and will be freed when the
622 static struct kfd_process *create_process(const struct task_struct *thread)
624 struct kfd_process *process;
627 process = kzalloc(sizeof(*process), GFP_KERNEL);
629 goto err_alloc_process;
631 kref_init(&process->ref);
632 mutex_init(&process->mutex);
633 process->mm = thread->mm;
634 process->lead_thread = thread->group_leader;
635 INIT_LIST_HEAD(&process->per_device_data);
636 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
637 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
638 process->last_restore_timestamp = get_jiffies_64();
639 kfd_event_init_process(process);
640 process->is_32bit_user_mode = in_compat_syscall();
642 process->pasid = kfd_pasid_alloc();
643 if (process->pasid == 0)
644 goto err_alloc_pasid;
646 if (kfd_alloc_process_doorbells(process) < 0)
647 goto err_alloc_doorbells;
649 err = pqm_init(&process->pqm, process);
651 goto err_process_pqm_init;
653 /* init process apertures*/
654 err = kfd_init_apertures(process);
656 goto err_init_apertures;
658 /* Must be last, have to use release destruction after this */
659 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
660 err = mmu_notifier_register(&process->mmu_notifier, process->mm);
662 goto err_register_notifier;
664 get_task_struct(process->lead_thread);
665 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
666 (uintptr_t)process->mm);
670 err_register_notifier:
671 kfd_process_free_outstanding_kfd_bos(process);
672 kfd_process_destroy_pdds(process);
674 pqm_uninit(&process->pqm);
675 err_process_pqm_init:
676 kfd_free_process_doorbells(process);
678 kfd_pasid_free(process->pasid);
680 mutex_destroy(&process->mutex);
686 static int init_doorbell_bitmap(struct qcm_process_device *qpd,
690 int range_start = dev->shared_resources.non_cp_doorbells_start;
691 int range_end = dev->shared_resources.non_cp_doorbells_end;
693 if (!KFD_IS_SOC15(dev->device_info->asic_family))
696 qpd->doorbell_bitmap =
697 kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
698 BITS_PER_BYTE), GFP_KERNEL);
699 if (!qpd->doorbell_bitmap)
702 /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
703 pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
704 pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
705 range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
706 range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
708 for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
709 if (i >= range_start && i <= range_end) {
710 set_bit(i, qpd->doorbell_bitmap);
711 set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
712 qpd->doorbell_bitmap);
719 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
720 struct kfd_process *p)
722 struct kfd_process_device *pdd = NULL;
724 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
731 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
732 struct kfd_process *p)
734 struct kfd_process_device *pdd = NULL;
736 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
740 if (init_doorbell_bitmap(&pdd->qpd, dev)) {
741 pr_err("Failed to init doorbell for process\n");
747 INIT_LIST_HEAD(&pdd->qpd.queues_list);
748 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
749 pdd->qpd.dqm = dev->dqm;
750 pdd->qpd.pqm = &p->pqm;
751 pdd->qpd.evicted = 0;
753 pdd->bound = PDD_UNBOUND;
754 pdd->already_dequeued = false;
755 list_add(&pdd->per_device_list, &p->per_device_data);
757 /* Init idr used for memory handle translation */
758 idr_init(&pdd->alloc_idr);
764 * kfd_process_device_init_vm - Initialize a VM for a process-device
766 * @pdd: The process-device
767 * @drm_file: Optional pointer to a DRM file descriptor
769 * If @drm_file is specified, it will be used to acquire the VM from
770 * that file descriptor. If successful, the @pdd takes ownership of
771 * the file descriptor.
773 * If @drm_file is NULL, a new VM is created.
775 * Returns 0 on success, -errno on failure.
777 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
778 struct file *drm_file)
780 struct kfd_process *p;
785 return drm_file ? -EBUSY : 0;
791 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
792 dev->kgd, drm_file, p->pasid,
793 &pdd->vm, &p->kgd_process_info, &p->ef);
795 ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid,
796 &pdd->vm, &p->kgd_process_info, &p->ef);
798 pr_err("Failed to create process VM object\n");
802 amdgpu_vm_set_task_info(pdd->vm);
804 ret = kfd_process_device_reserve_ib_mem(pdd);
806 goto err_reserve_ib_mem;
807 ret = kfd_process_device_init_cwsr_dgpu(pdd);
811 pdd->drm_file = drm_file;
817 kfd_process_device_free_bos(pdd);
819 amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm);
826 * Direct the IOMMU to bind the process (specifically the pasid->mm)
828 * Unbinding occurs when the process dies or the device is removed.
830 * Assumes that the process lock is held.
832 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
833 struct kfd_process *p)
835 struct kfd_process_device *pdd;
838 pdd = kfd_get_process_device_data(dev, p);
840 pr_err("Process device data doesn't exist\n");
841 return ERR_PTR(-ENOMEM);
844 err = kfd_iommu_bind_process_to_device(pdd);
848 err = kfd_process_device_init_vm(pdd, NULL);
855 struct kfd_process_device *kfd_get_first_process_device_data(
856 struct kfd_process *p)
858 return list_first_entry(&p->per_device_data,
859 struct kfd_process_device,
863 struct kfd_process_device *kfd_get_next_process_device_data(
864 struct kfd_process *p,
865 struct kfd_process_device *pdd)
867 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
869 return list_next_entry(pdd, per_device_list);
872 bool kfd_has_process_device_data(struct kfd_process *p)
874 return !(list_empty(&p->per_device_data));
877 /* Create specific handle mapped to mem from process local memory idr
878 * Assumes that the process lock is held.
880 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
883 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
886 /* Translate specific handle from process local memory idr
887 * Assumes that the process lock is held.
889 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
895 return idr_find(&pdd->alloc_idr, handle);
898 /* Remove specific handle from process local memory idr
899 * Assumes that the process lock is held.
901 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
905 idr_remove(&pdd->alloc_idr, handle);
908 /* This increments the process->ref counter. */
909 struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
911 struct kfd_process *p, *ret_p = NULL;
914 int idx = srcu_read_lock(&kfd_processes_srcu);
916 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
917 if (p->pasid == pasid) {
924 srcu_read_unlock(&kfd_processes_srcu, idx);
929 /* This increments the process->ref counter. */
930 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
932 struct kfd_process *p;
934 int idx = srcu_read_lock(&kfd_processes_srcu);
936 p = find_process_by_mm(mm);
940 srcu_read_unlock(&kfd_processes_srcu, idx);
945 /* process_evict_queues - Evict all user queues of a process
947 * Eviction is reference-counted per process-device. This means multiple
948 * evictions from different sources can be nested safely.
950 int kfd_process_evict_queues(struct kfd_process *p)
952 struct kfd_process_device *pdd;
954 unsigned int n_evicted = 0;
956 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
957 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
960 pr_err("Failed to evict process queues\n");
969 /* To keep state consistent, roll back partial eviction by
972 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
975 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
977 pr_err("Failed to restore queues\n");
985 /* process_restore_queues - Restore all user queues of a process */
986 int kfd_process_restore_queues(struct kfd_process *p)
988 struct kfd_process_device *pdd;
991 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
992 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
995 pr_err("Failed to restore process queues\n");
1004 static void evict_process_worker(struct work_struct *work)
1007 struct kfd_process *p;
1008 struct delayed_work *dwork;
1010 dwork = to_delayed_work(work);
1012 /* Process termination destroys this worker thread. So during the
1013 * lifetime of this thread, kfd_process p will be valid
1015 p = container_of(dwork, struct kfd_process, eviction_work);
1016 WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
1017 "Eviction fence mismatch\n");
1019 /* Narrow window of overlap between restore and evict work
1020 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
1021 * unreserves KFD BOs, it is possible to evicted again. But
1022 * restore has few more steps of finish. So lets wait for any
1023 * previous restore work to complete
1025 flush_delayed_work(&p->restore_work);
1027 pr_debug("Started evicting pasid 0x%x\n", p->pasid);
1028 ret = kfd_process_evict_queues(p);
1030 dma_fence_signal(p->ef);
1031 dma_fence_put(p->ef);
1033 queue_delayed_work(kfd_restore_wq, &p->restore_work,
1034 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
1036 pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
1038 pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
1041 static void restore_process_worker(struct work_struct *work)
1043 struct delayed_work *dwork;
1044 struct kfd_process *p;
1047 dwork = to_delayed_work(work);
1049 /* Process termination destroys this worker thread. So during the
1050 * lifetime of this thread, kfd_process p will be valid
1052 p = container_of(dwork, struct kfd_process, restore_work);
1053 pr_debug("Started restoring pasid 0x%x\n", p->pasid);
1055 /* Setting last_restore_timestamp before successful restoration.
1056 * Otherwise this would have to be set by KGD (restore_process_bos)
1057 * before KFD BOs are unreserved. If not, the process can be evicted
1058 * again before the timestamp is set.
1059 * If restore fails, the timestamp will be set again in the next
1060 * attempt. This would mean that the minimum GPU quanta would be
1061 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
1065 p->last_restore_timestamp = get_jiffies_64();
1066 ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
1069 pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
1070 p->pasid, PROCESS_BACK_OFF_TIME_MS);
1071 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
1072 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
1073 WARN(!ret, "reschedule restore work failed\n");
1077 ret = kfd_process_restore_queues(p);
1079 pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
1081 pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
1084 void kfd_suspend_all_processes(void)
1086 struct kfd_process *p;
1088 int idx = srcu_read_lock(&kfd_processes_srcu);
1090 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1091 cancel_delayed_work_sync(&p->eviction_work);
1092 cancel_delayed_work_sync(&p->restore_work);
1094 if (kfd_process_evict_queues(p))
1095 pr_err("Failed to suspend process 0x%x\n", p->pasid);
1096 dma_fence_signal(p->ef);
1097 dma_fence_put(p->ef);
1100 srcu_read_unlock(&kfd_processes_srcu, idx);
1103 int kfd_resume_all_processes(void)
1105 struct kfd_process *p;
1107 int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
1109 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1110 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
1111 pr_err("Restore process %d failed during resume\n",
1116 srcu_read_unlock(&kfd_processes_srcu, idx);
1120 int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
1121 struct vm_area_struct *vma)
1123 struct kfd_process_device *pdd;
1124 struct qcm_process_device *qpd;
1126 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
1127 pr_err("Incorrect CWSR mapping size.\n");
1131 pdd = kfd_get_process_device_data(dev, process);
1136 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1137 get_order(KFD_CWSR_TBA_TMA_SIZE));
1138 if (!qpd->cwsr_kaddr) {
1139 pr_err("Error allocating per process CWSR buffer.\n");
1143 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
1144 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
1145 /* Mapping pages to user process */
1146 return remap_pfn_range(vma, vma->vm_start,
1147 PFN_DOWN(__pa(qpd->cwsr_kaddr)),
1148 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
1151 void kfd_flush_tlb(struct kfd_process_device *pdd)
1153 struct kfd_dev *dev = pdd->dev;
1154 const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
1156 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1157 /* Nothing to flush until a VMID is assigned, which
1158 * only happens when the first queue is created.
1161 f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid);
1163 f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid);
1167 #if defined(CONFIG_DEBUG_FS)
1169 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
1171 struct kfd_process *p;
1175 int idx = srcu_read_lock(&kfd_processes_srcu);
1177 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1178 seq_printf(m, "Process %d PASID 0x%x:\n",
1179 p->lead_thread->tgid, p->pasid);
1181 mutex_lock(&p->mutex);
1182 r = pqm_debugfs_mqds(m, &p->pqm);
1183 mutex_unlock(&p->mutex);
1189 srcu_read_unlock(&kfd_processes_srcu, idx);