2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/mutex.h>
24 #include <linux/log2.h>
25 #include <linux/sched.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/task.h>
28 #include <linux/slab.h>
29 #include <linux/amd-iommu.h>
30 #include <linux/notifier.h>
31 #include <linux/compat.h>
32 #include <linux/mman.h>
33 #include <linux/file.h>
38 #include "kfd_device_queue_manager.h"
39 #include "kfd_dbgmgr.h"
40 #include "kfd_iommu.h"
43 * List of struct kfd_process (field kfd_process).
44 * Unique/indexed by mm_struct*
46 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
47 static DEFINE_MUTEX(kfd_processes_mutex);
49 DEFINE_SRCU(kfd_processes_srcu);
51 /* For process termination handling */
52 static struct workqueue_struct *kfd_process_wq;
54 /* Ordered, single-threaded workqueue for restoring evicted
55 * processes. Restoring multiple processes concurrently under memory
56 * pressure can lead to processes blocking each other from validating
57 * their BOs and result in a live-lock situation where processes
58 * remain evicted indefinitely.
60 static struct workqueue_struct *kfd_restore_wq;
62 static struct kfd_process *find_process(const struct task_struct *thread);
63 static void kfd_process_ref_release(struct kref *ref);
64 static struct kfd_process *create_process(const struct task_struct *thread,
67 static void evict_process_worker(struct work_struct *work);
68 static void restore_process_worker(struct work_struct *work);
71 int kfd_process_create_wq(void)
74 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
76 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
78 if (!kfd_process_wq || !kfd_restore_wq) {
79 kfd_process_destroy_wq();
86 void kfd_process_destroy_wq(void)
89 destroy_workqueue(kfd_process_wq);
90 kfd_process_wq = NULL;
93 destroy_workqueue(kfd_restore_wq);
94 kfd_restore_wq = NULL;
98 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
99 struct kfd_process_device *pdd)
101 struct kfd_dev *dev = pdd->dev;
103 dev->kfd2kgd->unmap_memory_to_gpu(dev->kgd, mem, pdd->vm);
104 dev->kfd2kgd->free_memory_of_gpu(dev->kgd, mem);
107 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
108 * This function should be only called right after the process
109 * is created and when kfd_processes_mutex is still being held
110 * to avoid concurrency. Because of that exclusiveness, we do
111 * not need to take p->mutex.
113 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
114 uint64_t gpu_va, uint32_t size,
115 uint32_t flags, void **kptr)
117 struct kfd_dev *kdev = pdd->dev;
118 struct kgd_mem *mem = NULL;
122 err = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
123 pdd->vm, &mem, NULL, flags);
127 err = kdev->kfd2kgd->map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
131 err = kdev->kfd2kgd->sync_memory(kdev->kgd, mem, true);
133 pr_debug("Sync memory failed, wait interrupted by user signal\n");
134 goto sync_memory_failed;
137 /* Create an obj handle so kfd_process_device_remove_obj_handle
138 * will take care of the bo removal when the process finishes.
139 * We do not need to take p->mutex, because the process is just
140 * created and the ioctls have not had the chance to run.
142 handle = kfd_process_device_create_obj_handle(pdd, mem);
150 err = kdev->kfd2kgd->map_gtt_bo_to_kernel(kdev->kgd,
151 (struct kgd_mem *)mem, kptr, NULL);
153 pr_debug("Map GTT BO to kernel failed\n");
154 goto free_obj_handle;
161 kfd_process_device_remove_obj_handle(pdd, handle);
164 kfd_process_free_gpuvm(mem, pdd);
168 kdev->kfd2kgd->free_memory_of_gpu(kdev->kgd, mem);
174 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
175 * process for IB usage The memory reserved is for KFD to submit
176 * IB to AMDGPU from kernel. If the memory is reserved
177 * successfully, ib_kaddr will have the CPU/kernel
178 * address. Check ib_kaddr before accessing the memory.
180 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
182 struct qcm_process_device *qpd = &pdd->qpd;
183 uint32_t flags = ALLOC_MEM_FLAGS_GTT |
184 ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
185 ALLOC_MEM_FLAGS_WRITABLE |
186 ALLOC_MEM_FLAGS_EXECUTABLE;
190 if (qpd->ib_kaddr || !qpd->ib_base)
193 /* ib_base is only set for dGPU */
194 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
199 qpd->ib_kaddr = kaddr;
204 struct kfd_process *kfd_create_process(struct file *filep)
206 struct kfd_process *process;
207 struct task_struct *thread = current;
210 return ERR_PTR(-EINVAL);
212 /* Only the pthreads threading model is supported. */
213 if (thread->group_leader->mm != thread->mm)
214 return ERR_PTR(-EINVAL);
217 * take kfd processes mutex before starting of process creation
218 * so there won't be a case where two threads of the same process
219 * create two kfd_process structures
221 mutex_lock(&kfd_processes_mutex);
223 /* A prior open of /dev/kfd could have already created the process. */
224 process = find_process(thread);
226 pr_debug("Process already found\n");
228 process = create_process(thread, filep);
230 mutex_unlock(&kfd_processes_mutex);
235 struct kfd_process *kfd_get_process(const struct task_struct *thread)
237 struct kfd_process *process;
240 return ERR_PTR(-EINVAL);
242 /* Only the pthreads threading model is supported. */
243 if (thread->group_leader->mm != thread->mm)
244 return ERR_PTR(-EINVAL);
246 process = find_process(thread);
251 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
253 struct kfd_process *process;
255 hash_for_each_possible_rcu(kfd_processes_table, process,
256 kfd_processes, (uintptr_t)mm)
257 if (process->mm == mm)
263 static struct kfd_process *find_process(const struct task_struct *thread)
265 struct kfd_process *p;
268 idx = srcu_read_lock(&kfd_processes_srcu);
269 p = find_process_by_mm(thread->mm);
270 srcu_read_unlock(&kfd_processes_srcu, idx);
275 void kfd_unref_process(struct kfd_process *p)
277 kref_put(&p->ref, kfd_process_ref_release);
280 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
282 struct kfd_process *p = pdd->process;
287 * Remove all handles from idr and release appropriate
288 * local memory object
290 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
291 struct kfd_process_device *peer_pdd;
293 list_for_each_entry(peer_pdd, &p->per_device_data,
297 peer_pdd->dev->kfd2kgd->unmap_memory_to_gpu(
298 peer_pdd->dev->kgd, mem, peer_pdd->vm);
301 pdd->dev->kfd2kgd->free_memory_of_gpu(pdd->dev->kgd, mem);
302 kfd_process_device_remove_obj_handle(pdd, id);
306 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
308 struct kfd_process_device *pdd;
310 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
311 kfd_process_device_free_bos(pdd);
314 static void kfd_process_destroy_pdds(struct kfd_process *p)
316 struct kfd_process_device *pdd, *temp;
318 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
320 pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
321 pdd->dev->id, p->pasid);
326 pdd->dev->kfd2kgd->destroy_process_vm(
327 pdd->dev->kgd, pdd->vm);
329 list_del(&pdd->per_device_list);
331 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
332 free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
333 get_order(KFD_CWSR_TBA_TMA_SIZE));
335 kfree(pdd->qpd.doorbell_bitmap);
336 idr_destroy(&pdd->alloc_idr);
342 /* No process locking is needed in this function, because the process
343 * is not findable any more. We must assume that no other thread is
344 * using it any more, otherwise we couldn't safely free the process
345 * structure in the end.
347 static void kfd_process_wq_release(struct work_struct *work)
349 struct kfd_process *p = container_of(work, struct kfd_process,
352 kfd_iommu_unbind_process(p);
354 kfd_process_free_outstanding_kfd_bos(p);
356 kfd_process_destroy_pdds(p);
357 dma_fence_put(p->ef);
359 kfd_event_free_process(p);
361 kfd_pasid_free(p->pasid);
362 kfd_free_process_doorbells(p);
364 mutex_destroy(&p->mutex);
366 put_task_struct(p->lead_thread);
371 static void kfd_process_ref_release(struct kref *ref)
373 struct kfd_process *p = container_of(ref, struct kfd_process, ref);
375 INIT_WORK(&p->release_work, kfd_process_wq_release);
376 queue_work(kfd_process_wq, &p->release_work);
379 static void kfd_process_destroy_delayed(struct rcu_head *rcu)
381 struct kfd_process *p = container_of(rcu, struct kfd_process, rcu);
383 kfd_unref_process(p);
386 static void kfd_process_notifier_release(struct mmu_notifier *mn,
387 struct mm_struct *mm)
389 struct kfd_process *p;
390 struct kfd_process_device *pdd = NULL;
393 * The kfd_process structure can not be free because the
394 * mmu_notifier srcu is read locked
396 p = container_of(mn, struct kfd_process, mmu_notifier);
397 if (WARN_ON(p->mm != mm))
400 mutex_lock(&kfd_processes_mutex);
401 hash_del_rcu(&p->kfd_processes);
402 mutex_unlock(&kfd_processes_mutex);
403 synchronize_srcu(&kfd_processes_srcu);
405 cancel_delayed_work_sync(&p->eviction_work);
406 cancel_delayed_work_sync(&p->restore_work);
408 mutex_lock(&p->mutex);
410 /* Iterate over all process device data structures and if the
411 * pdd is in debug mode, we should first force unregistration,
412 * then we will be able to destroy the queues
414 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
415 struct kfd_dev *dev = pdd->dev;
417 mutex_lock(kfd_get_dbgmgr_mutex());
418 if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
419 if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
420 kfd_dbgmgr_destroy(dev->dbgmgr);
424 mutex_unlock(kfd_get_dbgmgr_mutex());
427 kfd_process_dequeue_from_all_devices(p);
430 /* Indicate to other users that MM is no longer valid */
433 mutex_unlock(&p->mutex);
435 mmu_notifier_unregister_no_release(&p->mmu_notifier, mm);
436 mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
439 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
440 .release = kfd_process_notifier_release,
443 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
445 unsigned long offset;
446 struct kfd_process_device *pdd;
448 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
449 struct kfd_dev *dev = pdd->dev;
450 struct qcm_process_device *qpd = &pdd->qpd;
452 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
455 offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id))
457 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
458 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
461 if (IS_ERR_VALUE(qpd->tba_addr)) {
462 int err = qpd->tba_addr;
464 pr_err("Failure to set tba address. error %d.\n", err);
466 qpd->cwsr_kaddr = NULL;
470 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
472 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
473 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
474 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
480 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
482 struct kfd_dev *dev = pdd->dev;
483 struct qcm_process_device *qpd = &pdd->qpd;
484 uint32_t flags = ALLOC_MEM_FLAGS_GTT |
485 ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
489 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
492 /* cwsr_base is only set for dGPU */
493 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
494 KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr);
498 qpd->cwsr_kaddr = kaddr;
499 qpd->tba_addr = qpd->cwsr_base;
501 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
503 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
504 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
505 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
510 static struct kfd_process *create_process(const struct task_struct *thread,
513 struct kfd_process *process;
516 process = kzalloc(sizeof(*process), GFP_KERNEL);
519 goto err_alloc_process;
521 process->pasid = kfd_pasid_alloc();
522 if (process->pasid == 0)
523 goto err_alloc_pasid;
525 if (kfd_alloc_process_doorbells(process) < 0)
526 goto err_alloc_doorbells;
528 kref_init(&process->ref);
530 mutex_init(&process->mutex);
532 process->mm = thread->mm;
534 /* register notifier */
535 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
536 err = mmu_notifier_register(&process->mmu_notifier, process->mm);
538 goto err_mmu_notifier;
540 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
541 (uintptr_t)process->mm);
543 process->lead_thread = thread->group_leader;
544 get_task_struct(process->lead_thread);
546 INIT_LIST_HEAD(&process->per_device_data);
548 kfd_event_init_process(process);
550 err = pqm_init(&process->pqm, process);
552 goto err_process_pqm_init;
554 /* init process apertures*/
555 process->is_32bit_user_mode = in_compat_syscall();
556 err = kfd_init_apertures(process);
558 goto err_init_apertures;
560 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
561 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
562 process->last_restore_timestamp = get_jiffies_64();
564 err = kfd_process_init_cwsr_apu(process, filep);
571 kfd_process_free_outstanding_kfd_bos(process);
572 kfd_process_destroy_pdds(process);
574 pqm_uninit(&process->pqm);
575 err_process_pqm_init:
576 hash_del_rcu(&process->kfd_processes);
578 mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
580 mutex_destroy(&process->mutex);
581 kfd_free_process_doorbells(process);
583 kfd_pasid_free(process->pasid);
590 static int init_doorbell_bitmap(struct qcm_process_device *qpd,
595 if (!KFD_IS_SOC15(dev->device_info->asic_family))
598 qpd->doorbell_bitmap =
599 kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
600 BITS_PER_BYTE), GFP_KERNEL);
601 if (!qpd->doorbell_bitmap)
604 /* Mask out any reserved doorbells */
605 for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS; i++)
606 if ((dev->shared_resources.reserved_doorbell_mask & i) ==
607 dev->shared_resources.reserved_doorbell_val) {
608 set_bit(i, qpd->doorbell_bitmap);
609 pr_debug("reserved doorbell 0x%03x\n", i);
615 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
616 struct kfd_process *p)
618 struct kfd_process_device *pdd = NULL;
620 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
627 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
628 struct kfd_process *p)
630 struct kfd_process_device *pdd = NULL;
632 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
636 if (init_doorbell_bitmap(&pdd->qpd, dev)) {
637 pr_err("Failed to init doorbell for process\n");
643 INIT_LIST_HEAD(&pdd->qpd.queues_list);
644 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
645 pdd->qpd.dqm = dev->dqm;
646 pdd->qpd.pqm = &p->pqm;
647 pdd->qpd.evicted = 0;
649 pdd->bound = PDD_UNBOUND;
650 pdd->already_dequeued = false;
651 list_add(&pdd->per_device_list, &p->per_device_data);
653 /* Init idr used for memory handle translation */
654 idr_init(&pdd->alloc_idr);
660 * kfd_process_device_init_vm - Initialize a VM for a process-device
662 * @pdd: The process-device
663 * @drm_file: Optional pointer to a DRM file descriptor
665 * If @drm_file is specified, it will be used to acquire the VM from
666 * that file descriptor. If successful, the @pdd takes ownership of
667 * the file descriptor.
669 * If @drm_file is NULL, a new VM is created.
671 * Returns 0 on success, -errno on failure.
673 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
674 struct file *drm_file)
676 struct kfd_process *p;
681 return drm_file ? -EBUSY : 0;
687 ret = dev->kfd2kgd->acquire_process_vm(
689 &pdd->vm, &p->kgd_process_info, &p->ef);
691 ret = dev->kfd2kgd->create_process_vm(
692 dev->kgd, &pdd->vm, &p->kgd_process_info, &p->ef);
694 pr_err("Failed to create process VM object\n");
698 ret = kfd_process_device_reserve_ib_mem(pdd);
700 goto err_reserve_ib_mem;
701 ret = kfd_process_device_init_cwsr_dgpu(pdd);
705 pdd->drm_file = drm_file;
711 kfd_process_device_free_bos(pdd);
713 dev->kfd2kgd->destroy_process_vm(dev->kgd, pdd->vm);
720 * Direct the IOMMU to bind the process (specifically the pasid->mm)
722 * Unbinding occurs when the process dies or the device is removed.
724 * Assumes that the process lock is held.
726 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
727 struct kfd_process *p)
729 struct kfd_process_device *pdd;
732 pdd = kfd_get_process_device_data(dev, p);
734 pr_err("Process device data doesn't exist\n");
735 return ERR_PTR(-ENOMEM);
738 err = kfd_iommu_bind_process_to_device(pdd);
742 err = kfd_process_device_init_vm(pdd, NULL);
749 struct kfd_process_device *kfd_get_first_process_device_data(
750 struct kfd_process *p)
752 return list_first_entry(&p->per_device_data,
753 struct kfd_process_device,
757 struct kfd_process_device *kfd_get_next_process_device_data(
758 struct kfd_process *p,
759 struct kfd_process_device *pdd)
761 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
763 return list_next_entry(pdd, per_device_list);
766 bool kfd_has_process_device_data(struct kfd_process *p)
768 return !(list_empty(&p->per_device_data));
771 /* Create specific handle mapped to mem from process local memory idr
772 * Assumes that the process lock is held.
774 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
777 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
780 /* Translate specific handle from process local memory idr
781 * Assumes that the process lock is held.
783 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
789 return idr_find(&pdd->alloc_idr, handle);
792 /* Remove specific handle from process local memory idr
793 * Assumes that the process lock is held.
795 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
799 idr_remove(&pdd->alloc_idr, handle);
802 /* This increments the process->ref counter. */
803 struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
805 struct kfd_process *p, *ret_p = NULL;
808 int idx = srcu_read_lock(&kfd_processes_srcu);
810 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
811 if (p->pasid == pasid) {
818 srcu_read_unlock(&kfd_processes_srcu, idx);
823 /* This increments the process->ref counter. */
824 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
826 struct kfd_process *p;
828 int idx = srcu_read_lock(&kfd_processes_srcu);
830 p = find_process_by_mm(mm);
834 srcu_read_unlock(&kfd_processes_srcu, idx);
839 /* process_evict_queues - Evict all user queues of a process
841 * Eviction is reference-counted per process-device. This means multiple
842 * evictions from different sources can be nested safely.
844 int kfd_process_evict_queues(struct kfd_process *p)
846 struct kfd_process_device *pdd;
848 unsigned int n_evicted = 0;
850 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
851 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
854 pr_err("Failed to evict process queues\n");
863 /* To keep state consistent, roll back partial eviction by
866 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
869 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
871 pr_err("Failed to restore queues\n");
879 /* process_restore_queues - Restore all user queues of a process */
880 int kfd_process_restore_queues(struct kfd_process *p)
882 struct kfd_process_device *pdd;
885 list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
886 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
889 pr_err("Failed to restore process queues\n");
898 static void evict_process_worker(struct work_struct *work)
901 struct kfd_process *p;
902 struct delayed_work *dwork;
904 dwork = to_delayed_work(work);
906 /* Process termination destroys this worker thread. So during the
907 * lifetime of this thread, kfd_process p will be valid
909 p = container_of(dwork, struct kfd_process, eviction_work);
910 WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
911 "Eviction fence mismatch\n");
913 /* Narrow window of overlap between restore and evict work
914 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
915 * unreserves KFD BOs, it is possible to evicted again. But
916 * restore has few more steps of finish. So lets wait for any
917 * previous restore work to complete
919 flush_delayed_work(&p->restore_work);
921 pr_debug("Started evicting pasid %d\n", p->pasid);
922 ret = kfd_process_evict_queues(p);
924 dma_fence_signal(p->ef);
925 dma_fence_put(p->ef);
927 queue_delayed_work(kfd_restore_wq, &p->restore_work,
928 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
930 pr_debug("Finished evicting pasid %d\n", p->pasid);
932 pr_err("Failed to evict queues of pasid %d\n", p->pasid);
935 static void restore_process_worker(struct work_struct *work)
937 struct delayed_work *dwork;
938 struct kfd_process *p;
939 struct kfd_process_device *pdd;
942 dwork = to_delayed_work(work);
944 /* Process termination destroys this worker thread. So during the
945 * lifetime of this thread, kfd_process p will be valid
947 p = container_of(dwork, struct kfd_process, restore_work);
949 /* Call restore_process_bos on the first KGD device. This function
950 * takes care of restoring the whole process including other devices.
951 * Restore can fail if enough memory is not available. If so,
954 pdd = list_first_entry(&p->per_device_data,
955 struct kfd_process_device,
958 pr_debug("Started restoring pasid %d\n", p->pasid);
960 /* Setting last_restore_timestamp before successful restoration.
961 * Otherwise this would have to be set by KGD (restore_process_bos)
962 * before KFD BOs are unreserved. If not, the process can be evicted
963 * again before the timestamp is set.
964 * If restore fails, the timestamp will be set again in the next
965 * attempt. This would mean that the minimum GPU quanta would be
966 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
970 p->last_restore_timestamp = get_jiffies_64();
971 ret = pdd->dev->kfd2kgd->restore_process_bos(p->kgd_process_info,
974 pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
975 p->pasid, PROCESS_BACK_OFF_TIME_MS);
976 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
977 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
978 WARN(!ret, "reschedule restore work failed\n");
982 ret = kfd_process_restore_queues(p);
984 pr_debug("Finished restoring pasid %d\n", p->pasid);
986 pr_err("Failed to restore queues of pasid %d\n", p->pasid);
989 void kfd_suspend_all_processes(void)
991 struct kfd_process *p;
993 int idx = srcu_read_lock(&kfd_processes_srcu);
995 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
996 cancel_delayed_work_sync(&p->eviction_work);
997 cancel_delayed_work_sync(&p->restore_work);
999 if (kfd_process_evict_queues(p))
1000 pr_err("Failed to suspend process %d\n", p->pasid);
1001 dma_fence_signal(p->ef);
1002 dma_fence_put(p->ef);
1005 srcu_read_unlock(&kfd_processes_srcu, idx);
1008 int kfd_resume_all_processes(void)
1010 struct kfd_process *p;
1012 int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
1014 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1015 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
1016 pr_err("Restore process %d failed during resume\n",
1021 srcu_read_unlock(&kfd_processes_srcu, idx);
1025 int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
1026 struct vm_area_struct *vma)
1028 struct kfd_process_device *pdd;
1029 struct qcm_process_device *qpd;
1031 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
1032 pr_err("Incorrect CWSR mapping size.\n");
1036 pdd = kfd_get_process_device_data(dev, process);
1041 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1042 get_order(KFD_CWSR_TBA_TMA_SIZE));
1043 if (!qpd->cwsr_kaddr) {
1044 pr_err("Error allocating per process CWSR buffer.\n");
1048 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
1049 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
1050 /* Mapping pages to user process */
1051 return remap_pfn_range(vma, vma->vm_start,
1052 PFN_DOWN(__pa(qpd->cwsr_kaddr)),
1053 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
1056 void kfd_flush_tlb(struct kfd_process_device *pdd)
1058 struct kfd_dev *dev = pdd->dev;
1059 const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
1061 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1062 /* Nothing to flush until a VMID is assigned, which
1063 * only happens when the first queue is created.
1066 f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid);
1068 f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid);
1072 #if defined(CONFIG_DEBUG_FS)
1074 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
1076 struct kfd_process *p;
1080 int idx = srcu_read_lock(&kfd_processes_srcu);
1082 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1083 seq_printf(m, "Process %d PASID %d:\n",
1084 p->lead_thread->tgid, p->pasid);
1086 mutex_lock(&p->mutex);
1087 r = pqm_debugfs_mqds(m, &p->pqm);
1088 mutex_unlock(&p->mutex);
1094 srcu_read_unlock(&kfd_processes_srcu, idx);