2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/mutex.h>
24 #include <linux/log2.h>
25 #include <linux/sched.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/task.h>
28 #include <linux/slab.h>
29 #include <linux/amd-iommu.h>
30 #include <linux/notifier.h>
31 #include <linux/compat.h>
32 #include <linux/mman.h>
33 #include <linux/file.h>
34 #include "amdgpu_amdkfd.h"
39 #include "kfd_device_queue_manager.h"
40 #include "kfd_dbgmgr.h"
41 #include "kfd_iommu.h"
44 * List of struct kfd_process (field kfd_process).
45 * Unique/indexed by mm_struct*
47 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
48 static DEFINE_MUTEX(kfd_processes_mutex);
50 DEFINE_SRCU(kfd_processes_srcu);
52 /* For process termination handling */
53 static struct workqueue_struct *kfd_process_wq;
55 /* Ordered, single-threaded workqueue for restoring evicted
56 * processes. Restoring multiple processes concurrently under memory
57 * pressure can lead to processes blocking each other from validating
58 * their BOs and result in a live-lock situation where processes
59 * remain evicted indefinitely.
61 static struct workqueue_struct *kfd_restore_wq;
63 static struct kfd_process *find_process(const struct task_struct *thread);
64 static void kfd_process_ref_release(struct kref *ref);
65 static struct kfd_process *create_process(const struct task_struct *thread);
66 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
68 static void evict_process_worker(struct work_struct *work);
69 static void restore_process_worker(struct work_struct *work);
71 struct kfd_procfs_tree {
75 static struct kfd_procfs_tree procfs;
77 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
82 if (strcmp(attr->name, "pasid") == 0) {
83 struct kfd_process *p = container_of(attr, struct kfd_process,
87 pr_err("Invalid attribute");
91 return snprintf(buffer, PAGE_SIZE, "%d\n", val);
94 static void kfd_procfs_kobj_release(struct kobject *kobj)
99 static const struct sysfs_ops kfd_procfs_ops = {
100 .show = kfd_procfs_show,
103 static struct kobj_type procfs_type = {
104 .release = kfd_procfs_kobj_release,
105 .sysfs_ops = &kfd_procfs_ops,
108 void kfd_procfs_init(void)
112 procfs.kobj = kfd_alloc_struct(procfs.kobj);
116 ret = kobject_init_and_add(procfs.kobj, &procfs_type,
117 &kfd_device->kobj, "proc");
119 pr_warn("Could not create procfs proc folder");
120 /* If we fail to create the procfs, clean up */
121 kfd_procfs_shutdown();
125 void kfd_procfs_shutdown(void)
128 kobject_del(procfs.kobj);
129 kobject_put(procfs.kobj);
134 int kfd_process_create_wq(void)
137 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
139 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
141 if (!kfd_process_wq || !kfd_restore_wq) {
142 kfd_process_destroy_wq();
149 void kfd_process_destroy_wq(void)
151 if (kfd_process_wq) {
152 destroy_workqueue(kfd_process_wq);
153 kfd_process_wq = NULL;
155 if (kfd_restore_wq) {
156 destroy_workqueue(kfd_restore_wq);
157 kfd_restore_wq = NULL;
161 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
162 struct kfd_process_device *pdd)
164 struct kfd_dev *dev = pdd->dev;
166 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
167 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem);
170 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
171 * This function should be only called right after the process
172 * is created and when kfd_processes_mutex is still being held
173 * to avoid concurrency. Because of that exclusiveness, we do
174 * not need to take p->mutex.
176 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
177 uint64_t gpu_va, uint32_t size,
178 uint32_t flags, void **kptr)
180 struct kfd_dev *kdev = pdd->dev;
181 struct kgd_mem *mem = NULL;
185 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
186 pdd->vm, &mem, NULL, flags);
190 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
194 err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true);
196 pr_debug("Sync memory failed, wait interrupted by user signal\n");
197 goto sync_memory_failed;
200 /* Create an obj handle so kfd_process_device_remove_obj_handle
201 * will take care of the bo removal when the process finishes.
202 * We do not need to take p->mutex, because the process is just
203 * created and the ioctls have not had the chance to run.
205 handle = kfd_process_device_create_obj_handle(pdd, mem);
213 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
214 (struct kgd_mem *)mem, kptr, NULL);
216 pr_debug("Map GTT BO to kernel failed\n");
217 goto free_obj_handle;
224 kfd_process_device_remove_obj_handle(pdd, handle);
227 kfd_process_free_gpuvm(mem, pdd);
231 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem);
237 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
238 * process for IB usage The memory reserved is for KFD to submit
239 * IB to AMDGPU from kernel. If the memory is reserved
240 * successfully, ib_kaddr will have the CPU/kernel
241 * address. Check ib_kaddr before accessing the memory.
243 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
245 struct qcm_process_device *qpd = &pdd->qpd;
246 uint32_t flags = ALLOC_MEM_FLAGS_GTT |
247 ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
248 ALLOC_MEM_FLAGS_WRITABLE |
249 ALLOC_MEM_FLAGS_EXECUTABLE;
253 if (qpd->ib_kaddr || !qpd->ib_base)
256 /* ib_base is only set for dGPU */
257 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
262 qpd->ib_kaddr = kaddr;
267 struct kfd_process *kfd_create_process(struct file *filep)
269 struct kfd_process *process;
270 struct task_struct *thread = current;
274 return ERR_PTR(-EINVAL);
276 /* Only the pthreads threading model is supported. */
277 if (thread->group_leader->mm != thread->mm)
278 return ERR_PTR(-EINVAL);
281 * take kfd processes mutex before starting of process creation
282 * so there won't be a case where two threads of the same process
283 * create two kfd_process structures
285 mutex_lock(&kfd_processes_mutex);
287 /* A prior open of /dev/kfd could have already created the process. */
288 process = find_process(thread);
290 pr_debug("Process already found\n");
292 process = create_process(thread);
296 ret = kfd_process_init_cwsr_apu(process, filep);
298 process = ERR_PTR(ret);
305 process->kobj = kfd_alloc_struct(process->kobj);
306 if (!process->kobj) {
307 pr_warn("Creating procfs kobject failed");
310 ret = kobject_init_and_add(process->kobj, &procfs_type,
312 (int)process->lead_thread->pid);
314 pr_warn("Creating procfs pid directory failed");
318 process->attr_pasid.name = "pasid";
319 process->attr_pasid.mode = KFD_SYSFS_FILE_MODE;
320 sysfs_attr_init(&process->attr_pasid);
321 ret = sysfs_create_file(process->kobj, &process->attr_pasid);
323 pr_warn("Creating pasid for pid %d failed",
324 (int)process->lead_thread->pid);
327 mutex_unlock(&kfd_processes_mutex);
332 struct kfd_process *kfd_get_process(const struct task_struct *thread)
334 struct kfd_process *process;
337 return ERR_PTR(-EINVAL);
339 /* Only the pthreads threading model is supported. */
340 if (thread->group_leader->mm != thread->mm)
341 return ERR_PTR(-EINVAL);
343 process = find_process(thread);
345 return ERR_PTR(-EINVAL);
350 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
352 struct kfd_process *process;
354 hash_for_each_possible_rcu(kfd_processes_table, process,
355 kfd_processes, (uintptr_t)mm)
356 if (process->mm == mm)
362 static struct kfd_process *find_process(const struct task_struct *thread)
364 struct kfd_process *p;
367 idx = srcu_read_lock(&kfd_processes_srcu);
368 p = find_process_by_mm(thread->mm);
369 srcu_read_unlock(&kfd_processes_srcu, idx);
374 void kfd_unref_process(struct kfd_process *p)
376 kref_put(&p->ref, kfd_process_ref_release);
379 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
381 struct kfd_process *p = pdd->process;
386 * Remove all handles from idr and release appropriate
387 * local memory object
389 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
390 struct kfd_process_device *peer_pdd;
392 list_for_each_entry(peer_pdd, &p->per_device_data,
396 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
397 peer_pdd->dev->kgd, mem, peer_pdd->vm);
400 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem);
401 kfd_process_device_remove_obj_handle(pdd, id);
405 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
407 struct kfd_process_device *pdd;
409 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
410 kfd_process_device_free_bos(pdd);
413 static void kfd_process_destroy_pdds(struct kfd_process *p)
415 struct kfd_process_device *pdd, *temp;
417 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
419 pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
420 pdd->dev->id, p->pasid);
423 amdgpu_amdkfd_gpuvm_release_process_vm(
424 pdd->dev->kgd, pdd->vm);
428 amdgpu_amdkfd_gpuvm_destroy_process_vm(
429 pdd->dev->kgd, pdd->vm);
431 list_del(&pdd->per_device_list);
433 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
434 free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
435 get_order(KFD_CWSR_TBA_TMA_SIZE));
437 kfree(pdd->qpd.doorbell_bitmap);
438 idr_destroy(&pdd->alloc_idr);
444 /* No process locking is needed in this function, because the process
445 * is not findable any more. We must assume that no other thread is
446 * using it any more, otherwise we couldn't safely free the process
447 * structure in the end.
449 static void kfd_process_wq_release(struct work_struct *work)
451 struct kfd_process *p = container_of(work, struct kfd_process,
454 /* Remove the procfs files */
456 sysfs_remove_file(p->kobj, &p->attr_pasid);
457 kobject_del(p->kobj);
458 kobject_put(p->kobj);
462 kfd_iommu_unbind_process(p);
464 kfd_process_free_outstanding_kfd_bos(p);
466 kfd_process_destroy_pdds(p);
467 dma_fence_put(p->ef);
469 kfd_event_free_process(p);
471 kfd_pasid_free(p->pasid);
472 kfd_free_process_doorbells(p);
474 mutex_destroy(&p->mutex);
476 put_task_struct(p->lead_thread);
481 static void kfd_process_ref_release(struct kref *ref)
483 struct kfd_process *p = container_of(ref, struct kfd_process, ref);
485 INIT_WORK(&p->release_work, kfd_process_wq_release);
486 queue_work(kfd_process_wq, &p->release_work);
489 static void kfd_process_free_notifier(struct mmu_notifier *mn)
491 kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
494 static void kfd_process_notifier_release(struct mmu_notifier *mn,
495 struct mm_struct *mm)
497 struct kfd_process *p;
498 struct kfd_process_device *pdd = NULL;
501 * The kfd_process structure can not be free because the
502 * mmu_notifier srcu is read locked
504 p = container_of(mn, struct kfd_process, mmu_notifier);
505 if (WARN_ON(p->mm != mm))
508 mutex_lock(&kfd_processes_mutex);
509 hash_del_rcu(&p->kfd_processes);
510 mutex_unlock(&kfd_processes_mutex);
511 synchronize_srcu(&kfd_processes_srcu);
513 cancel_delayed_work_sync(&p->eviction_work);
514 cancel_delayed_work_sync(&p->restore_work);
516 mutex_lock(&p->mutex);
518 /* Iterate over all process device data structures and if the
519 * pdd is in debug mode, we should first force unregistration,
520 * then we will be able to destroy the queues
522 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
523 struct kfd_dev *dev = pdd->dev;
525 mutex_lock(kfd_get_dbgmgr_mutex());
526 if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
527 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
528 kfd_dbgmgr_destroy(dev->dbgmgr);
532 mutex_unlock(kfd_get_dbgmgr_mutex());
535 kfd_process_dequeue_from_all_devices(p);
538 /* Indicate to other users that MM is no longer valid */
541 mutex_unlock(&p->mutex);
543 mmu_notifier_put(&p->mmu_notifier);
546 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
547 .release = kfd_process_notifier_release,
548 .free_notifier = kfd_process_free_notifier,
551 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
553 unsigned long offset;
554 struct kfd_process_device *pdd;
556 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
557 struct kfd_dev *dev = pdd->dev;
558 struct qcm_process_device *qpd = &pdd->qpd;
560 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
563 offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id))
565 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
566 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
569 if (IS_ERR_VALUE(qpd->tba_addr)) {
570 int err = qpd->tba_addr;
572 pr_err("Failure to set tba address. error %d.\n", err);
574 qpd->cwsr_kaddr = NULL;
578 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
580 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
581 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
582 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
588 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
590 struct kfd_dev *dev = pdd->dev;
591 struct qcm_process_device *qpd = &pdd->qpd;
592 uint32_t flags = ALLOC_MEM_FLAGS_GTT |
593 ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
597 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
600 /* cwsr_base is only set for dGPU */
601 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
602 KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr);
606 qpd->cwsr_kaddr = kaddr;
607 qpd->tba_addr = qpd->cwsr_base;
609 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
611 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
612 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
613 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
619 * On return the kfd_process is fully operational and will be freed when the
622 static struct kfd_process *create_process(const struct task_struct *thread)
624 struct kfd_process *process;
627 process = kzalloc(sizeof(*process), GFP_KERNEL);
629 goto err_alloc_process;
631 kref_init(&process->ref);
632 mutex_init(&process->mutex);
633 process->mm = thread->mm;
634 process->lead_thread = thread->group_leader;
635 INIT_LIST_HEAD(&process->per_device_data);
636 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
637 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
638 process->last_restore_timestamp = get_jiffies_64();
639 kfd_event_init_process(process);
640 process->is_32bit_user_mode = in_compat_syscall();
642 process->pasid = kfd_pasid_alloc();
643 if (process->pasid == 0)
644 goto err_alloc_pasid;
646 if (kfd_alloc_process_doorbells(process) < 0)
647 goto err_alloc_doorbells;
649 err = pqm_init(&process->pqm, process);
651 goto err_process_pqm_init;
653 /* init process apertures*/
654 err = kfd_init_apertures(process);
656 goto err_init_apertures;
658 /* Must be last, have to use release destruction after this */
659 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
660 err = mmu_notifier_register(&process->mmu_notifier, process->mm);
662 goto err_register_notifier;
664 get_task_struct(process->lead_thread);
665 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
666 (uintptr_t)process->mm);
670 err_register_notifier:
671 kfd_process_free_outstanding_kfd_bos(process);
672 kfd_process_destroy_pdds(process);
674 pqm_uninit(&process->pqm);
675 err_process_pqm_init:
676 kfd_free_process_doorbells(process);
678 kfd_pasid_free(process->pasid);
680 mutex_destroy(&process->mutex);
686 static int init_doorbell_bitmap(struct qcm_process_device *qpd,
691 if (!KFD_IS_SOC15(dev->device_info->asic_family))
694 qpd->doorbell_bitmap =
695 kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
696 BITS_PER_BYTE), GFP_KERNEL);
697 if (!qpd->doorbell_bitmap)
700 /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
701 for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
702 if (i >= dev->shared_resources.non_cp_doorbells_start
703 && i <= dev->shared_resources.non_cp_doorbells_end) {
704 set_bit(i, qpd->doorbell_bitmap);
705 set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
706 qpd->doorbell_bitmap);
707 pr_debug("reserved doorbell 0x%03x and 0x%03x\n", i,
708 i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
715 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
716 struct kfd_process *p)
718 struct kfd_process_device *pdd = NULL;
720 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
727 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
728 struct kfd_process *p)
730 struct kfd_process_device *pdd = NULL;
732 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
736 if (init_doorbell_bitmap(&pdd->qpd, dev)) {
737 pr_err("Failed to init doorbell for process\n");
743 INIT_LIST_HEAD(&pdd->qpd.queues_list);
744 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
745 pdd->qpd.dqm = dev->dqm;
746 pdd->qpd.pqm = &p->pqm;
747 pdd->qpd.evicted = 0;
749 pdd->bound = PDD_UNBOUND;
750 pdd->already_dequeued = false;
751 list_add(&pdd->per_device_list, &p->per_device_data);
753 /* Init idr used for memory handle translation */
754 idr_init(&pdd->alloc_idr);
760 * kfd_process_device_init_vm - Initialize a VM for a process-device
762 * @pdd: The process-device
763 * @drm_file: Optional pointer to a DRM file descriptor
765 * If @drm_file is specified, it will be used to acquire the VM from
766 * that file descriptor. If successful, the @pdd takes ownership of
767 * the file descriptor.
769 * If @drm_file is NULL, a new VM is created.
771 * Returns 0 on success, -errno on failure.
773 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
774 struct file *drm_file)
776 struct kfd_process *p;
781 return drm_file ? -EBUSY : 0;
787 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
788 dev->kgd, drm_file, p->pasid,
789 &pdd->vm, &p->kgd_process_info, &p->ef);
791 ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid,
792 &pdd->vm, &p->kgd_process_info, &p->ef);
794 pr_err("Failed to create process VM object\n");
798 amdgpu_vm_set_task_info(pdd->vm);
800 ret = kfd_process_device_reserve_ib_mem(pdd);
802 goto err_reserve_ib_mem;
803 ret = kfd_process_device_init_cwsr_dgpu(pdd);
807 pdd->drm_file = drm_file;
813 kfd_process_device_free_bos(pdd);
815 amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm);
822 * Direct the IOMMU to bind the process (specifically the pasid->mm)
824 * Unbinding occurs when the process dies or the device is removed.
826 * Assumes that the process lock is held.
828 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
829 struct kfd_process *p)
831 struct kfd_process_device *pdd;
834 pdd = kfd_get_process_device_data(dev, p);
836 pr_err("Process device data doesn't exist\n");
837 return ERR_PTR(-ENOMEM);
840 err = kfd_iommu_bind_process_to_device(pdd);
844 err = kfd_process_device_init_vm(pdd, NULL);
851 struct kfd_process_device *kfd_get_first_process_device_data(
852 struct kfd_process *p)
854 return list_first_entry(&p->per_device_data,
855 struct kfd_process_device,
859 struct kfd_process_device *kfd_get_next_process_device_data(
860 struct kfd_process *p,
861 struct kfd_process_device *pdd)
863 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
865 return list_next_entry(pdd, per_device_list);
868 bool kfd_has_process_device_data(struct kfd_process *p)
870 return !(list_empty(&p->per_device_data));
873 /* Create specific handle mapped to mem from process local memory idr
874 * Assumes that the process lock is held.
876 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
879 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
882 /* Translate specific handle from process local memory idr
883 * Assumes that the process lock is held.
885 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
891 return idr_find(&pdd->alloc_idr, handle);
894 /* Remove specific handle from process local memory idr
895 * Assumes that the process lock is held.
897 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
901 idr_remove(&pdd->alloc_idr, handle);
904 /* This increments the process->ref counter. */
905 struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
907 struct kfd_process *p, *ret_p = NULL;
910 int idx = srcu_read_lock(&kfd_processes_srcu);
912 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
913 if (p->pasid == pasid) {
920 srcu_read_unlock(&kfd_processes_srcu, idx);
925 /* This increments the process->ref counter. */
926 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
928 struct kfd_process *p;
930 int idx = srcu_read_lock(&kfd_processes_srcu);
932 p = find_process_by_mm(mm);
936 srcu_read_unlock(&kfd_processes_srcu, idx);
941 /* process_evict_queues - Evict all user queues of a process
943 * Eviction is reference-counted per process-device. This means multiple
944 * evictions from different sources can be nested safely.
946 int kfd_process_evict_queues(struct kfd_process *p)
948 struct kfd_process_device *pdd;
950 unsigned int n_evicted = 0;
952 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
953 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
956 pr_err("Failed to evict process queues\n");
965 /* To keep state consistent, roll back partial eviction by
968 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
971 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
973 pr_err("Failed to restore queues\n");
981 /* process_restore_queues - Restore all user queues of a process */
982 int kfd_process_restore_queues(struct kfd_process *p)
984 struct kfd_process_device *pdd;
987 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
988 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
991 pr_err("Failed to restore process queues\n");
1000 static void evict_process_worker(struct work_struct *work)
1003 struct kfd_process *p;
1004 struct delayed_work *dwork;
1006 dwork = to_delayed_work(work);
1008 /* Process termination destroys this worker thread. So during the
1009 * lifetime of this thread, kfd_process p will be valid
1011 p = container_of(dwork, struct kfd_process, eviction_work);
1012 WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
1013 "Eviction fence mismatch\n");
1015 /* Narrow window of overlap between restore and evict work
1016 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
1017 * unreserves KFD BOs, it is possible to evicted again. But
1018 * restore has few more steps of finish. So lets wait for any
1019 * previous restore work to complete
1021 flush_delayed_work(&p->restore_work);
1023 pr_debug("Started evicting pasid %d\n", p->pasid);
1024 ret = kfd_process_evict_queues(p);
1026 dma_fence_signal(p->ef);
1027 dma_fence_put(p->ef);
1029 queue_delayed_work(kfd_restore_wq, &p->restore_work,
1030 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
1032 pr_debug("Finished evicting pasid %d\n", p->pasid);
1034 pr_err("Failed to evict queues of pasid %d\n", p->pasid);
1037 static void restore_process_worker(struct work_struct *work)
1039 struct delayed_work *dwork;
1040 struct kfd_process *p;
1043 dwork = to_delayed_work(work);
1045 /* Process termination destroys this worker thread. So during the
1046 * lifetime of this thread, kfd_process p will be valid
1048 p = container_of(dwork, struct kfd_process, restore_work);
1049 pr_debug("Started restoring pasid %d\n", p->pasid);
1051 /* Setting last_restore_timestamp before successful restoration.
1052 * Otherwise this would have to be set by KGD (restore_process_bos)
1053 * before KFD BOs are unreserved. If not, the process can be evicted
1054 * again before the timestamp is set.
1055 * If restore fails, the timestamp will be set again in the next
1056 * attempt. This would mean that the minimum GPU quanta would be
1057 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
1061 p->last_restore_timestamp = get_jiffies_64();
1062 ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
1065 pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
1066 p->pasid, PROCESS_BACK_OFF_TIME_MS);
1067 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
1068 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
1069 WARN(!ret, "reschedule restore work failed\n");
1073 ret = kfd_process_restore_queues(p);
1075 pr_debug("Finished restoring pasid %d\n", p->pasid);
1077 pr_err("Failed to restore queues of pasid %d\n", p->pasid);
1080 void kfd_suspend_all_processes(void)
1082 struct kfd_process *p;
1084 int idx = srcu_read_lock(&kfd_processes_srcu);
1086 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1087 cancel_delayed_work_sync(&p->eviction_work);
1088 cancel_delayed_work_sync(&p->restore_work);
1090 if (kfd_process_evict_queues(p))
1091 pr_err("Failed to suspend process %d\n", p->pasid);
1092 dma_fence_signal(p->ef);
1093 dma_fence_put(p->ef);
1096 srcu_read_unlock(&kfd_processes_srcu, idx);
1099 int kfd_resume_all_processes(void)
1101 struct kfd_process *p;
1103 int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
1105 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1106 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
1107 pr_err("Restore process %d failed during resume\n",
1112 srcu_read_unlock(&kfd_processes_srcu, idx);
1116 int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
1117 struct vm_area_struct *vma)
1119 struct kfd_process_device *pdd;
1120 struct qcm_process_device *qpd;
1122 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
1123 pr_err("Incorrect CWSR mapping size.\n");
1127 pdd = kfd_get_process_device_data(dev, process);
1132 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1133 get_order(KFD_CWSR_TBA_TMA_SIZE));
1134 if (!qpd->cwsr_kaddr) {
1135 pr_err("Error allocating per process CWSR buffer.\n");
1139 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
1140 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
1141 /* Mapping pages to user process */
1142 return remap_pfn_range(vma, vma->vm_start,
1143 PFN_DOWN(__pa(qpd->cwsr_kaddr)),
1144 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
1147 void kfd_flush_tlb(struct kfd_process_device *pdd)
1149 struct kfd_dev *dev = pdd->dev;
1150 const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
1152 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1153 /* Nothing to flush until a VMID is assigned, which
1154 * only happens when the first queue is created.
1157 f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid);
1159 f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid);
1163 #if defined(CONFIG_DEBUG_FS)
1165 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
1167 struct kfd_process *p;
1171 int idx = srcu_read_lock(&kfd_processes_srcu);
1173 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1174 seq_printf(m, "Process %d PASID %d:\n",
1175 p->lead_thread->tgid, p->pasid);
1177 mutex_lock(&p->mutex);
1178 r = pqm_debugfs_mqds(m, &p->pqm);
1179 mutex_unlock(&p->mutex);
1185 srcu_read_unlock(&kfd_processes_srcu, idx);