1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/mutex.h>
25 #include <linux/log2.h>
26 #include <linux/sched.h>
27 #include <linux/sched/mm.h>
28 #include <linux/sched/task.h>
29 #include <linux/mmu_context.h>
30 #include <linux/slab.h>
31 #include <linux/notifier.h>
32 #include <linux/compat.h>
33 #include <linux/mman.h>
34 #include <linux/file.h>
35 #include <linux/pm_runtime.h>
36 #include "amdgpu_amdkfd.h"
42 #include "kfd_device_queue_manager.h"
44 #include "kfd_smi_events.h"
45 #include "kfd_debug.h"
48 * List of struct kfd_process (field kfd_process).
49 * Unique/indexed by mm_struct*
51 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
52 DEFINE_MUTEX(kfd_processes_mutex);
54 DEFINE_SRCU(kfd_processes_srcu);
56 /* For process termination handling */
57 static struct workqueue_struct *kfd_process_wq;
59 /* Ordered, single-threaded workqueue for restoring evicted
60 * processes. Restoring multiple processes concurrently under memory
61 * pressure can lead to processes blocking each other from validating
62 * their BOs and result in a live-lock situation where processes
63 * remain evicted indefinitely.
65 static struct workqueue_struct *kfd_restore_wq;
67 static struct kfd_process *find_process(const struct task_struct *thread,
69 static void kfd_process_ref_release(struct kref *ref);
70 static struct kfd_process *create_process(const struct task_struct *thread);
72 static void evict_process_worker(struct work_struct *work);
73 static void restore_process_worker(struct work_struct *work);
75 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd);
77 struct kfd_procfs_tree {
81 static struct kfd_procfs_tree procfs;
84 * Structure for SDMA activity tracking
86 struct kfd_sdma_activity_handler_workarea {
87 struct work_struct sdma_activity_work;
88 struct kfd_process_device *pdd;
89 uint64_t sdma_activity_counter;
92 struct temp_sdma_queue_list {
93 uint64_t __user *rptr;
95 unsigned int queue_id;
96 struct list_head list;
99 static void kfd_sdma_activity_worker(struct work_struct *work)
101 struct kfd_sdma_activity_handler_workarea *workarea;
102 struct kfd_process_device *pdd;
104 struct mm_struct *mm;
106 struct qcm_process_device *qpd;
107 struct device_queue_manager *dqm;
109 struct temp_sdma_queue_list sdma_q_list;
110 struct temp_sdma_queue_list *sdma_q, *next;
112 workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
123 * Total SDMA activity is current SDMA activity + past SDMA activity
124 * Past SDMA count is stored in pdd.
125 * To get the current activity counters for all active SDMA queues,
126 * we loop over all SDMA queues and get their counts from user-space.
128 * We cannot call get_user() with dqm_lock held as it can cause
129 * a circular lock dependency situation. To read the SDMA stats,
130 * we need to do the following:
132 * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list,
133 * with dqm_lock/dqm_unlock().
134 * 2. Call get_user() for each node in temporary list without dqm_lock.
135 * Save the SDMA count for each node and also add the count to the total
136 * SDMA count counter.
137 * Its possible, during this step, a few SDMA queue nodes got deleted
138 * from the qpd->queues_list.
139 * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted.
140 * If any node got deleted, its SDMA count would be captured in the sdma
141 * past activity counter. So subtract the SDMA counter stored in step 2
142 * for this node from the total SDMA count.
144 INIT_LIST_HEAD(&sdma_q_list.list);
147 * Create the temp list of all SDMA queues
151 list_for_each_entry(q, &qpd->queues_list, list) {
152 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
153 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
156 sdma_q = kzalloc(sizeof(struct temp_sdma_queue_list), GFP_KERNEL);
162 INIT_LIST_HEAD(&sdma_q->list);
163 sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr;
164 sdma_q->queue_id = q->properties.queue_id;
165 list_add_tail(&sdma_q->list, &sdma_q_list.list);
169 * If the temp list is empty, then no SDMA queues nodes were found in
170 * qpd->queues_list. Return the past activity count as the total sdma
173 if (list_empty(&sdma_q_list.list)) {
174 workarea->sdma_activity_counter = pdd->sdma_past_activity_counter;
182 * Get the usage count for each SDMA queue in temp_list.
184 mm = get_task_mm(pdd->process->lead_thread);
190 list_for_each_entry(sdma_q, &sdma_q_list.list, list) {
192 ret = read_sdma_queue_counter(sdma_q->rptr, &val);
194 pr_debug("Failed to read SDMA queue active counter for queue id: %d",
197 sdma_q->sdma_val = val;
198 workarea->sdma_activity_counter += val;
202 kthread_unuse_mm(mm);
206 * Do a second iteration over qpd_queues_list to check if any SDMA
207 * nodes got deleted while fetching SDMA counter.
211 workarea->sdma_activity_counter += pdd->sdma_past_activity_counter;
213 list_for_each_entry(q, &qpd->queues_list, list) {
214 if (list_empty(&sdma_q_list.list))
217 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
218 (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
221 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
222 if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) &&
223 (sdma_q->queue_id == q->properties.queue_id)) {
224 list_del(&sdma_q->list);
234 * If temp list is not empty, it implies some queues got deleted
235 * from qpd->queues_list during SDMA usage read. Subtract the SDMA
236 * count for each node from the total SDMA count.
238 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
239 workarea->sdma_activity_counter -= sdma_q->sdma_val;
240 list_del(&sdma_q->list);
247 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
248 list_del(&sdma_q->list);
254 * kfd_get_cu_occupancy - Collect number of waves in-flight on this device
255 * by current process. Translates acquired wave count into number of compute units
258 * @attr: Handle of attribute that allows reporting of wave count. The attribute
259 * handle encapsulates GPU device it is associated with, thereby allowing collection
260 * of waves in flight, etc
261 * @buffer: Handle of user provided buffer updated with wave count
263 * Return: Number of bytes written to user buffer or an error value
265 static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
269 int max_waves_per_cu;
270 struct kfd_node *dev = NULL;
271 struct kfd_process *proc = NULL;
272 struct kfd_process_device *pdd = NULL;
274 pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
276 if (dev->kfd2kgd->get_cu_occupancy == NULL)
281 if (pdd->qpd.queue_count == 0) {
282 pr_debug("Gpu-Id: %d has no active queues for process %d\n",
283 dev->id, proc->pasid);
284 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
287 /* Collect wave count from device if it supports */
289 max_waves_per_cu = 0;
290 dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt,
291 &max_waves_per_cu, 0);
293 /* Translate wave count to number of compute units */
294 cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
295 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
298 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
301 if (strcmp(attr->name, "pasid") == 0) {
302 struct kfd_process *p = container_of(attr, struct kfd_process,
305 return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
306 } else if (strncmp(attr->name, "vram_", 5) == 0) {
307 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
309 return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
310 } else if (strncmp(attr->name, "sdma_", 5) == 0) {
311 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
313 struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
315 INIT_WORK(&sdma_activity_work_handler.sdma_activity_work,
316 kfd_sdma_activity_worker);
318 sdma_activity_work_handler.pdd = pdd;
319 sdma_activity_work_handler.sdma_activity_counter = 0;
321 schedule_work(&sdma_activity_work_handler.sdma_activity_work);
323 flush_work(&sdma_activity_work_handler.sdma_activity_work);
325 return snprintf(buffer, PAGE_SIZE, "%llu\n",
326 (sdma_activity_work_handler.sdma_activity_counter)/
327 SDMA_ACTIVITY_DIVISOR);
329 pr_err("Invalid attribute");
336 static void kfd_procfs_kobj_release(struct kobject *kobj)
341 static const struct sysfs_ops kfd_procfs_ops = {
342 .show = kfd_procfs_show,
345 static const struct kobj_type procfs_type = {
346 .release = kfd_procfs_kobj_release,
347 .sysfs_ops = &kfd_procfs_ops,
350 void kfd_procfs_init(void)
354 procfs.kobj = kfd_alloc_struct(procfs.kobj);
358 ret = kobject_init_and_add(procfs.kobj, &procfs_type,
359 &kfd_device->kobj, "proc");
361 pr_warn("Could not create procfs proc folder");
362 /* If we fail to create the procfs, clean up */
363 kfd_procfs_shutdown();
367 void kfd_procfs_shutdown(void)
370 kobject_del(procfs.kobj);
371 kobject_put(procfs.kobj);
376 static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
377 struct attribute *attr, char *buffer)
379 struct queue *q = container_of(kobj, struct queue, kobj);
381 if (!strcmp(attr->name, "size"))
382 return snprintf(buffer, PAGE_SIZE, "%llu",
383 q->properties.queue_size);
384 else if (!strcmp(attr->name, "type"))
385 return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
386 else if (!strcmp(attr->name, "gpuid"))
387 return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
389 pr_err("Invalid attribute");
394 static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
395 struct attribute *attr, char *buffer)
397 if (strcmp(attr->name, "evicted_ms") == 0) {
398 struct kfd_process_device *pdd = container_of(attr,
399 struct kfd_process_device,
401 uint64_t evict_jiffies;
403 evict_jiffies = atomic64_read(&pdd->evict_duration_counter);
405 return snprintf(buffer,
408 jiffies64_to_msecs(evict_jiffies));
410 /* Sysfs handle that gets CU occupancy is per device */
411 } else if (strcmp(attr->name, "cu_occupancy") == 0) {
412 return kfd_get_cu_occupancy(attr, buffer);
414 pr_err("Invalid attribute");
420 static ssize_t kfd_sysfs_counters_show(struct kobject *kobj,
421 struct attribute *attr, char *buf)
423 struct kfd_process_device *pdd;
425 if (!strcmp(attr->name, "faults")) {
426 pdd = container_of(attr, struct kfd_process_device,
428 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults));
430 if (!strcmp(attr->name, "page_in")) {
431 pdd = container_of(attr, struct kfd_process_device,
433 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in));
435 if (!strcmp(attr->name, "page_out")) {
436 pdd = container_of(attr, struct kfd_process_device,
438 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out));
443 static struct attribute attr_queue_size = {
445 .mode = KFD_SYSFS_FILE_MODE
448 static struct attribute attr_queue_type = {
450 .mode = KFD_SYSFS_FILE_MODE
453 static struct attribute attr_queue_gpuid = {
455 .mode = KFD_SYSFS_FILE_MODE
458 static struct attribute *procfs_queue_attrs[] = {
464 ATTRIBUTE_GROUPS(procfs_queue);
466 static const struct sysfs_ops procfs_queue_ops = {
467 .show = kfd_procfs_queue_show,
470 static const struct kobj_type procfs_queue_type = {
471 .sysfs_ops = &procfs_queue_ops,
472 .default_groups = procfs_queue_groups,
475 static const struct sysfs_ops procfs_stats_ops = {
476 .show = kfd_procfs_stats_show,
479 static const struct kobj_type procfs_stats_type = {
480 .sysfs_ops = &procfs_stats_ops,
481 .release = kfd_procfs_kobj_release,
484 static const struct sysfs_ops sysfs_counters_ops = {
485 .show = kfd_sysfs_counters_show,
488 static const struct kobj_type sysfs_counters_type = {
489 .sysfs_ops = &sysfs_counters_ops,
490 .release = kfd_procfs_kobj_release,
493 int kfd_procfs_add_queue(struct queue *q)
495 struct kfd_process *proc;
498 if (!q || !q->process)
502 /* Create proc/<pid>/queues/<queue id> folder */
503 if (!proc->kobj_queues)
505 ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
506 proc->kobj_queues, "%u", q->properties.queue_id);
508 pr_warn("Creating proc/<pid>/queues/%u failed",
509 q->properties.queue_id);
510 kobject_put(&q->kobj);
517 static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr,
522 if (!kobj || !attr || !name)
526 attr->mode = KFD_SYSFS_FILE_MODE;
527 sysfs_attr_init(attr);
529 ret = sysfs_create_file(kobj, attr);
531 pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret);
534 static void kfd_procfs_add_sysfs_stats(struct kfd_process *p)
538 char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
544 * Create sysfs files for each GPU:
545 * - proc/<pid>/stats_<gpuid>/
546 * - proc/<pid>/stats_<gpuid>/evicted_ms
547 * - proc/<pid>/stats_<gpuid>/cu_occupancy
549 for (i = 0; i < p->n_pdds; i++) {
550 struct kfd_process_device *pdd = p->pdds[i];
552 snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
553 "stats_%u", pdd->dev->id);
554 pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats);
555 if (!pdd->kobj_stats)
558 ret = kobject_init_and_add(pdd->kobj_stats,
564 pr_warn("Creating KFD proc/stats_%s folder failed",
566 kobject_put(pdd->kobj_stats);
567 pdd->kobj_stats = NULL;
571 kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict,
573 /* Add sysfs file to report compute unit occupancy */
574 if (pdd->dev->kfd2kgd->get_cu_occupancy)
575 kfd_sysfs_create_file(pdd->kobj_stats,
576 &pdd->attr_cu_occupancy,
581 static void kfd_procfs_add_sysfs_counters(struct kfd_process *p)
585 char counters_dir_filename[MAX_SYSFS_FILENAME_LEN];
591 * Create sysfs files for each GPU which supports SVM
592 * - proc/<pid>/counters_<gpuid>/
593 * - proc/<pid>/counters_<gpuid>/faults
594 * - proc/<pid>/counters_<gpuid>/page_in
595 * - proc/<pid>/counters_<gpuid>/page_out
597 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
598 struct kfd_process_device *pdd = p->pdds[i];
599 struct kobject *kobj_counters;
601 snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN,
602 "counters_%u", pdd->dev->id);
603 kobj_counters = kfd_alloc_struct(kobj_counters);
607 ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type,
608 p->kobj, counters_dir_filename);
610 pr_warn("Creating KFD proc/%s folder failed",
611 counters_dir_filename);
612 kobject_put(kobj_counters);
616 pdd->kobj_counters = kobj_counters;
617 kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults,
619 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in,
621 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out,
626 static void kfd_procfs_add_sysfs_files(struct kfd_process *p)
634 * Create sysfs files for each GPU:
635 * - proc/<pid>/vram_<gpuid>
636 * - proc/<pid>/sdma_<gpuid>
638 for (i = 0; i < p->n_pdds; i++) {
639 struct kfd_process_device *pdd = p->pdds[i];
641 snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
643 kfd_sysfs_create_file(p->kobj, &pdd->attr_vram,
646 snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
648 kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma,
653 void kfd_procfs_del_queue(struct queue *q)
658 kobject_del(&q->kobj);
659 kobject_put(&q->kobj);
662 int kfd_process_create_wq(void)
665 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
667 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq",
670 if (!kfd_process_wq || !kfd_restore_wq) {
671 kfd_process_destroy_wq();
678 void kfd_process_destroy_wq(void)
680 if (kfd_process_wq) {
681 destroy_workqueue(kfd_process_wq);
682 kfd_process_wq = NULL;
684 if (kfd_restore_wq) {
685 destroy_workqueue(kfd_restore_wq);
686 kfd_restore_wq = NULL;
690 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
691 struct kfd_process_device *pdd, void **kptr)
693 struct kfd_node *dev = pdd->dev;
696 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
700 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv);
701 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv,
705 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
706 * This function should be only called right after the process
707 * is created and when kfd_processes_mutex is still being held
708 * to avoid concurrency. Because of that exclusiveness, we do
709 * not need to take p->mutex.
711 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
712 uint64_t gpu_va, uint32_t size,
713 uint32_t flags, struct kgd_mem **mem, void **kptr)
715 struct kfd_node *kdev = pdd->dev;
718 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size,
719 pdd->drm_priv, mem, NULL,
724 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem,
729 err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true);
731 pr_debug("Sync memory failed, wait interrupted by user signal\n");
732 goto sync_memory_failed;
736 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(
737 (struct kgd_mem *)*mem, kptr, NULL);
739 pr_debug("Map GTT BO to kernel failed\n");
740 goto sync_memory_failed;
747 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv);
750 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv,
758 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
759 * process for IB usage The memory reserved is for KFD to submit
760 * IB to AMDGPU from kernel. If the memory is reserved
761 * successfully, ib_kaddr will have the CPU/kernel
762 * address. Check ib_kaddr before accessing the memory.
764 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
766 struct qcm_process_device *qpd = &pdd->qpd;
767 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
768 KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
769 KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
770 KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
775 if (qpd->ib_kaddr || !qpd->ib_base)
778 /* ib_base is only set for dGPU */
779 ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
785 qpd->ib_kaddr = kaddr;
790 static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd)
792 struct qcm_process_device *qpd = &pdd->qpd;
794 if (!qpd->ib_kaddr || !qpd->ib_base)
797 kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr);
800 struct kfd_process *kfd_create_process(struct task_struct *thread)
802 struct kfd_process *process;
805 if (!(thread->mm && mmget_not_zero(thread->mm)))
806 return ERR_PTR(-EINVAL);
808 /* Only the pthreads threading model is supported. */
809 if (thread->group_leader->mm != thread->mm) {
811 return ERR_PTR(-EINVAL);
815 * take kfd processes mutex before starting of process creation
816 * so there won't be a case where two threads of the same process
817 * create two kfd_process structures
819 mutex_lock(&kfd_processes_mutex);
821 if (kfd_is_locked()) {
822 pr_debug("KFD is locked! Cannot create process");
823 process = ERR_PTR(-EINVAL);
827 /* A prior open of /dev/kfd could have already created the process. */
828 process = find_process(thread, false);
830 pr_debug("Process already found\n");
832 process = create_process(thread);
839 process->kobj = kfd_alloc_struct(process->kobj);
840 if (!process->kobj) {
841 pr_warn("Creating procfs kobject failed");
844 ret = kobject_init_and_add(process->kobj, &procfs_type,
846 (int)process->lead_thread->pid);
848 pr_warn("Creating procfs pid directory failed");
849 kobject_put(process->kobj);
853 kfd_sysfs_create_file(process->kobj, &process->attr_pasid,
856 process->kobj_queues = kobject_create_and_add("queues",
858 if (!process->kobj_queues)
859 pr_warn("Creating KFD proc/queues folder failed");
861 kfd_procfs_add_sysfs_stats(process);
862 kfd_procfs_add_sysfs_files(process);
863 kfd_procfs_add_sysfs_counters(process);
865 init_waitqueue_head(&process->wait_irq_drain);
868 if (!IS_ERR(process))
869 kref_get(&process->ref);
870 mutex_unlock(&kfd_processes_mutex);
876 struct kfd_process *kfd_get_process(const struct task_struct *thread)
878 struct kfd_process *process;
881 return ERR_PTR(-EINVAL);
883 /* Only the pthreads threading model is supported. */
884 if (thread->group_leader->mm != thread->mm)
885 return ERR_PTR(-EINVAL);
887 process = find_process(thread, false);
889 return ERR_PTR(-EINVAL);
894 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
896 struct kfd_process *process;
898 hash_for_each_possible_rcu(kfd_processes_table, process,
899 kfd_processes, (uintptr_t)mm)
900 if (process->mm == mm)
906 static struct kfd_process *find_process(const struct task_struct *thread,
909 struct kfd_process *p;
912 idx = srcu_read_lock(&kfd_processes_srcu);
913 p = find_process_by_mm(thread->mm);
916 srcu_read_unlock(&kfd_processes_srcu, idx);
921 void kfd_unref_process(struct kfd_process *p)
923 kref_put(&p->ref, kfd_process_ref_release);
926 /* This increments the process->ref counter. */
927 struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid)
929 struct task_struct *task = NULL;
930 struct kfd_process *p = NULL;
934 get_task_struct(task);
936 task = get_pid_task(pid, PIDTYPE_PID);
940 p = find_process(task, true);
941 put_task_struct(task);
947 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
949 struct kfd_process *p = pdd->process;
955 * Remove all handles from idr and release appropriate
956 * local memory object
958 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
960 for (i = 0; i < p->n_pdds; i++) {
961 struct kfd_process_device *peer_pdd = p->pdds[i];
963 if (!peer_pdd->drm_priv)
965 amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
966 peer_pdd->dev->adev, mem, peer_pdd->drm_priv);
969 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem,
970 pdd->drm_priv, NULL);
971 kfd_process_device_remove_obj_handle(pdd, id);
976 * Just kunmap and unpin signal BO here. It will be freed in
977 * kfd_process_free_outstanding_kfd_bos()
979 static void kfd_process_kunmap_signal_bo(struct kfd_process *p)
981 struct kfd_process_device *pdd;
982 struct kfd_node *kdev;
985 kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle));
989 mutex_lock(&p->mutex);
991 pdd = kfd_get_process_device_data(kdev, p);
995 mem = kfd_process_device_translate_handle(
996 pdd, GET_IDR_HANDLE(p->signal_handle));
1000 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
1003 mutex_unlock(&p->mutex);
1006 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
1010 for (i = 0; i < p->n_pdds; i++)
1011 kfd_process_device_free_bos(p->pdds[i]);
1014 static void kfd_process_destroy_pdds(struct kfd_process *p)
1018 for (i = 0; i < p->n_pdds; i++) {
1019 struct kfd_process_device *pdd = p->pdds[i];
1021 pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
1022 pdd->dev->id, p->pasid);
1024 kfd_process_device_destroy_cwsr_dgpu(pdd);
1025 kfd_process_device_destroy_ib_mem(pdd);
1027 if (pdd->drm_file) {
1028 amdgpu_amdkfd_gpuvm_release_process_vm(
1029 pdd->dev->adev, pdd->drm_priv);
1030 fput(pdd->drm_file);
1033 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
1034 free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
1035 get_order(KFD_CWSR_TBA_TMA_SIZE));
1037 idr_destroy(&pdd->alloc_idr);
1039 kfd_free_process_doorbells(pdd->dev->kfd, pdd);
1041 if (pdd->dev->kfd->shared_resources.enable_mes)
1042 amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
1045 * before destroying pdd, make sure to report availability
1048 if (pdd->runtime_inuse) {
1049 pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev);
1050 pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev);
1051 pdd->runtime_inuse = false;
1060 static void kfd_process_remove_sysfs(struct kfd_process *p)
1062 struct kfd_process_device *pdd;
1068 sysfs_remove_file(p->kobj, &p->attr_pasid);
1069 kobject_del(p->kobj_queues);
1070 kobject_put(p->kobj_queues);
1071 p->kobj_queues = NULL;
1073 for (i = 0; i < p->n_pdds; i++) {
1076 sysfs_remove_file(p->kobj, &pdd->attr_vram);
1077 sysfs_remove_file(p->kobj, &pdd->attr_sdma);
1079 sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict);
1080 if (pdd->dev->kfd2kgd->get_cu_occupancy)
1081 sysfs_remove_file(pdd->kobj_stats,
1082 &pdd->attr_cu_occupancy);
1083 kobject_del(pdd->kobj_stats);
1084 kobject_put(pdd->kobj_stats);
1085 pdd->kobj_stats = NULL;
1088 for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
1091 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults);
1092 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in);
1093 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out);
1094 kobject_del(pdd->kobj_counters);
1095 kobject_put(pdd->kobj_counters);
1096 pdd->kobj_counters = NULL;
1099 kobject_del(p->kobj);
1100 kobject_put(p->kobj);
1104 /* No process locking is needed in this function, because the process
1105 * is not findable any more. We must assume that no other thread is
1106 * using it any more, otherwise we couldn't safely free the process
1107 * structure in the end.
1109 static void kfd_process_wq_release(struct work_struct *work)
1111 struct kfd_process *p = container_of(work, struct kfd_process,
1113 struct dma_fence *ef;
1115 kfd_process_dequeue_from_all_devices(p);
1116 pqm_uninit(&p->pqm);
1118 /* Signal the eviction fence after user mode queues are
1119 * destroyed. This allows any BOs to be freed without
1120 * triggering pointless evictions or waiting for fences.
1123 ef = rcu_access_pointer(p->ef);
1124 dma_fence_signal(ef);
1126 kfd_process_remove_sysfs(p);
1128 kfd_process_kunmap_signal_bo(p);
1129 kfd_process_free_outstanding_kfd_bos(p);
1130 svm_range_list_fini(p);
1132 kfd_process_destroy_pdds(p);
1135 kfd_event_free_process(p);
1137 kfd_pasid_free(p->pasid);
1138 mutex_destroy(&p->mutex);
1140 put_task_struct(p->lead_thread);
1145 static void kfd_process_ref_release(struct kref *ref)
1147 struct kfd_process *p = container_of(ref, struct kfd_process, ref);
1149 INIT_WORK(&p->release_work, kfd_process_wq_release);
1150 queue_work(kfd_process_wq, &p->release_work);
1153 static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
1155 int idx = srcu_read_lock(&kfd_processes_srcu);
1156 struct kfd_process *p = find_process_by_mm(mm);
1158 srcu_read_unlock(&kfd_processes_srcu, idx);
1160 return p ? &p->mmu_notifier : ERR_PTR(-ESRCH);
1163 static void kfd_process_free_notifier(struct mmu_notifier *mn)
1165 kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
1168 static void kfd_process_notifier_release_internal(struct kfd_process *p)
1172 cancel_delayed_work_sync(&p->eviction_work);
1173 cancel_delayed_work_sync(&p->restore_work);
1175 for (i = 0; i < p->n_pdds; i++) {
1176 struct kfd_process_device *pdd = p->pdds[i];
1178 /* re-enable GFX OFF since runtime enable with ttmp setup disabled it. */
1179 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev) && p->runtime_info.ttmp_setup)
1180 amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
1183 /* Indicate to other users that MM is no longer valid */
1185 kfd_dbg_trap_disable(p);
1187 if (atomic_read(&p->debugged_process_count) > 0) {
1188 struct kfd_process *target;
1190 int idx = srcu_read_lock(&kfd_processes_srcu);
1192 hash_for_each_rcu(kfd_processes_table, temp, target, kfd_processes) {
1193 if (target->debugger_process && target->debugger_process == p) {
1194 mutex_lock_nested(&target->mutex, 1);
1195 kfd_dbg_trap_disable(target);
1196 mutex_unlock(&target->mutex);
1197 if (atomic_read(&p->debugged_process_count) == 0)
1202 srcu_read_unlock(&kfd_processes_srcu, idx);
1205 mmu_notifier_put(&p->mmu_notifier);
1208 static void kfd_process_notifier_release(struct mmu_notifier *mn,
1209 struct mm_struct *mm)
1211 struct kfd_process *p;
1214 * The kfd_process structure can not be free because the
1215 * mmu_notifier srcu is read locked
1217 p = container_of(mn, struct kfd_process, mmu_notifier);
1218 if (WARN_ON(p->mm != mm))
1221 mutex_lock(&kfd_processes_mutex);
1223 * Do early return if table is empty.
1225 * This could potentially happen if this function is called concurrently
1226 * by mmu_notifier and by kfd_cleanup_pocesses.
1229 if (hash_empty(kfd_processes_table)) {
1230 mutex_unlock(&kfd_processes_mutex);
1233 hash_del_rcu(&p->kfd_processes);
1234 mutex_unlock(&kfd_processes_mutex);
1235 synchronize_srcu(&kfd_processes_srcu);
1237 kfd_process_notifier_release_internal(p);
1240 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
1241 .release = kfd_process_notifier_release,
1242 .alloc_notifier = kfd_process_alloc_notifier,
1243 .free_notifier = kfd_process_free_notifier,
1247 * This code handles the case when driver is being unloaded before all
1248 * mm_struct are released. We need to safely free the kfd_process and
1249 * avoid race conditions with mmu_notifier that might try to free them.
1252 void kfd_cleanup_processes(void)
1254 struct kfd_process *p;
1255 struct hlist_node *p_temp;
1257 HLIST_HEAD(cleanup_list);
1260 * Move all remaining kfd_process from the process table to a
1261 * temp list for processing. Once done, callback from mmu_notifier
1262 * release will not see the kfd_process in the table and do early return,
1263 * avoiding double free issues.
1265 mutex_lock(&kfd_processes_mutex);
1266 hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
1267 hash_del_rcu(&p->kfd_processes);
1268 synchronize_srcu(&kfd_processes_srcu);
1269 hlist_add_head(&p->kfd_processes, &cleanup_list);
1271 mutex_unlock(&kfd_processes_mutex);
1273 hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
1274 kfd_process_notifier_release_internal(p);
1277 * Ensures that all outstanding free_notifier get called, triggering
1278 * the release of the kfd_process struct.
1280 mmu_notifier_synchronize();
1283 int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
1285 unsigned long offset;
1291 for (i = 0; i < p->n_pdds; i++) {
1292 struct kfd_node *dev = p->pdds[i]->dev;
1293 struct qcm_process_device *qpd = &p->pdds[i]->qpd;
1295 if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
1298 offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
1299 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
1300 KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
1301 MAP_SHARED, offset);
1303 if (IS_ERR_VALUE(qpd->tba_addr)) {
1304 int err = qpd->tba_addr;
1306 pr_err("Failure to set tba address. error %d.\n", err);
1308 qpd->cwsr_kaddr = NULL;
1312 memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
1314 kfd_process_set_trap_debug_flag(qpd, p->debug_trap_enabled);
1316 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1317 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1318 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1326 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
1328 struct kfd_node *dev = pdd->dev;
1329 struct qcm_process_device *qpd = &pdd->qpd;
1330 uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
1331 | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
1332 | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1333 struct kgd_mem *mem;
1337 if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
1340 /* cwsr_base is only set for dGPU */
1341 ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
1342 KFD_CWSR_TBA_TMA_SIZE, flags, &mem, &kaddr);
1346 qpd->cwsr_mem = mem;
1347 qpd->cwsr_kaddr = kaddr;
1348 qpd->tba_addr = qpd->cwsr_base;
1350 memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
1352 kfd_process_set_trap_debug_flag(&pdd->qpd,
1353 pdd->process->debug_trap_enabled);
1355 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1356 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1357 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1362 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd)
1364 struct kfd_node *dev = pdd->dev;
1365 struct qcm_process_device *qpd = &pdd->qpd;
1367 if (!dev->kfd->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
1370 kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr);
1373 void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
1377 if (qpd->cwsr_kaddr) {
1378 /* KFD trap handler is bound, record as second-level TBA/TMA
1379 * in first-level TMA. First-level trap will jump to second.
1382 (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1386 /* No trap handler bound, bind as first-level TBA/TMA. */
1387 qpd->tba_addr = tba_addr;
1388 qpd->tma_addr = tma_addr;
1392 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
1396 /* On most GFXv9 GPUs, the retry mode in the SQ must match the
1397 * boot time retry setting. Mixing processes with different
1398 * XNACK/retry settings can hang the GPU.
1400 * Different GPUs can have different noretry settings depending
1401 * on HW bugs or limitations. We need to find at least one
1402 * XNACK mode for this process that's compatible with all GPUs.
1403 * Fortunately GPUs with retry enabled (noretry=0) can run code
1404 * built for XNACK-off. On GFXv9 it may perform slower.
1406 * Therefore applications built for XNACK-off can always be
1407 * supported and will be our fallback if any GPU does not
1410 for (i = 0; i < p->n_pdds; i++) {
1411 struct kfd_node *dev = p->pdds[i]->dev;
1413 /* Only consider GFXv9 and higher GPUs. Older GPUs don't
1414 * support the SVM APIs and don't need to be considered
1415 * for the XNACK mode selection.
1417 if (!KFD_IS_SOC15(dev))
1419 /* Aldebaran can always support XNACK because it can support
1420 * per-process XNACK mode selection. But let the dev->noretry
1421 * setting still influence the default XNACK mode.
1423 if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev)) {
1424 if (!amdgpu_sriov_xnack_support(dev->kfd->adev)) {
1425 pr_debug("SRIOV platform xnack not supported\n");
1431 /* GFXv10 and later GPUs do not support shader preemption
1432 * during page faults. This can lead to poor QoS for queue
1433 * management and memory-manager-related preemptions or
1436 if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
1439 if (dev->kfd->noretry)
1446 void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd,
1449 if (qpd->cwsr_kaddr) {
1451 (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1457 * On return the kfd_process is fully operational and will be freed when the
1460 static struct kfd_process *create_process(const struct task_struct *thread)
1462 struct kfd_process *process;
1463 struct mmu_notifier *mn;
1466 process = kzalloc(sizeof(*process), GFP_KERNEL);
1468 goto err_alloc_process;
1470 kref_init(&process->ref);
1471 mutex_init(&process->mutex);
1472 process->mm = thread->mm;
1473 process->lead_thread = thread->group_leader;
1474 process->n_pdds = 0;
1475 process->queues_paused = false;
1476 INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
1477 INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
1478 process->last_restore_timestamp = get_jiffies_64();
1479 err = kfd_event_init_process(process);
1481 goto err_event_init;
1482 process->is_32bit_user_mode = in_compat_syscall();
1483 process->debug_trap_enabled = false;
1484 process->debugger_process = NULL;
1485 process->exception_enable_mask = 0;
1486 atomic_set(&process->debugged_process_count, 0);
1487 sema_init(&process->runtime_enable_sema, 0);
1489 process->pasid = kfd_pasid_alloc();
1490 if (process->pasid == 0) {
1492 goto err_alloc_pasid;
1495 err = pqm_init(&process->pqm, process);
1497 goto err_process_pqm_init;
1499 /* init process apertures*/
1500 err = kfd_init_apertures(process);
1502 goto err_init_apertures;
1504 /* Check XNACK support after PDDs are created in kfd_init_apertures */
1505 process->xnack_enabled = kfd_process_xnack_mode(process, false);
1507 err = svm_range_list_init(process);
1509 goto err_init_svm_range_list;
1511 /* alloc_notifier needs to find the process in the hash table */
1512 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
1513 (uintptr_t)process->mm);
1515 /* Avoid free_notifier to start kfd_process_wq_release if
1516 * mmu_notifier_get failed because of pending signal.
1518 kref_get(&process->ref);
1520 /* MMU notifier registration must be the last call that can fail
1521 * because after this point we cannot unwind the process creation.
1522 * After this point, mmu_notifier_put will trigger the cleanup by
1523 * dropping the last process reference in the free_notifier.
1525 mn = mmu_notifier_get(&kfd_process_mmu_notifier_ops, process->mm);
1528 goto err_register_notifier;
1530 BUG_ON(mn != &process->mmu_notifier);
1532 kfd_unref_process(process);
1533 get_task_struct(process->lead_thread);
1535 INIT_WORK(&process->debug_event_workarea, debug_event_write_work_handler);
1539 err_register_notifier:
1540 hash_del_rcu(&process->kfd_processes);
1541 svm_range_list_fini(process);
1542 err_init_svm_range_list:
1543 kfd_process_free_outstanding_kfd_bos(process);
1544 kfd_process_destroy_pdds(process);
1546 pqm_uninit(&process->pqm);
1547 err_process_pqm_init:
1548 kfd_pasid_free(process->pasid);
1550 kfd_event_free_process(process);
1552 mutex_destroy(&process->mutex);
1555 return ERR_PTR(err);
1558 struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev,
1559 struct kfd_process *p)
1563 for (i = 0; i < p->n_pdds; i++)
1564 if (p->pdds[i]->dev == dev)
1570 struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
1571 struct kfd_process *p)
1573 struct kfd_process_device *pdd = NULL;
1576 if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
1578 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
1583 INIT_LIST_HEAD(&pdd->qpd.queues_list);
1584 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
1585 pdd->qpd.dqm = dev->dqm;
1586 pdd->qpd.pqm = &p->pqm;
1587 pdd->qpd.evicted = 0;
1588 pdd->qpd.mapped_gws_queue = false;
1590 pdd->bound = PDD_UNBOUND;
1591 pdd->already_dequeued = false;
1592 pdd->runtime_inuse = false;
1593 pdd->vram_usage = 0;
1594 pdd->sdma_past_activity_counter = 0;
1595 pdd->user_gpu_id = dev->id;
1596 atomic64_set(&pdd->evict_duration_counter, 0);
1598 if (dev->kfd->shared_resources.enable_mes) {
1599 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev,
1600 AMDGPU_MES_PROC_CTX_SIZE,
1602 &pdd->proc_ctx_gpu_addr,
1603 &pdd->proc_ctx_cpu_ptr,
1606 pr_err("failed to allocate process context bo\n");
1609 memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
1612 p->pdds[p->n_pdds++] = pdd;
1613 if (kfd_dbg_is_per_vmid_supported(pdd->dev))
1614 pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap(
1619 /* Init idr used for memory handle translation */
1620 idr_init(&pdd->alloc_idr);
1630 * kfd_process_device_init_vm - Initialize a VM for a process-device
1632 * @pdd: The process-device
1633 * @drm_file: Optional pointer to a DRM file descriptor
1635 * If @drm_file is specified, it will be used to acquire the VM from
1636 * that file descriptor. If successful, the @pdd takes ownership of
1637 * the file descriptor.
1639 * If @drm_file is NULL, a new VM is created.
1641 * Returns 0 on success, -errno on failure.
1643 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
1644 struct file *drm_file)
1646 struct amdgpu_fpriv *drv_priv;
1647 struct amdgpu_vm *avm;
1648 struct kfd_process *p;
1649 struct dma_fence *ef;
1650 struct kfd_node *dev;
1659 ret = amdgpu_file_to_fpriv(drm_file, &drv_priv);
1662 avm = &drv_priv->vm;
1667 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
1668 &p->kgd_process_info,
1671 pr_err("Failed to create process VM object\n");
1674 RCU_INIT_POINTER(p->ef, ef);
1675 pdd->drm_priv = drm_file->private_data;
1677 ret = kfd_process_device_reserve_ib_mem(pdd);
1679 goto err_reserve_ib_mem;
1680 ret = kfd_process_device_init_cwsr_dgpu(pdd);
1684 ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, avm, p->pasid);
1688 pdd->drm_file = drm_file;
1693 kfd_process_device_destroy_cwsr_dgpu(pdd);
1695 kfd_process_device_destroy_ib_mem(pdd);
1697 pdd->drm_priv = NULL;
1698 amdgpu_amdkfd_gpuvm_destroy_cb(dev->adev, avm);
1704 * Direct the IOMMU to bind the process (specifically the pasid->mm)
1706 * Unbinding occurs when the process dies or the device is removed.
1708 * Assumes that the process lock is held.
1710 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev,
1711 struct kfd_process *p)
1713 struct kfd_process_device *pdd;
1716 pdd = kfd_get_process_device_data(dev, p);
1718 pr_err("Process device data doesn't exist\n");
1719 return ERR_PTR(-ENOMEM);
1723 return ERR_PTR(-ENODEV);
1726 * signal runtime-pm system to auto resume and prevent
1727 * further runtime suspend once device pdd is created until
1730 if (!pdd->runtime_inuse) {
1731 err = pm_runtime_get_sync(adev_to_drm(dev->adev)->dev);
1733 pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev);
1734 return ERR_PTR(err);
1739 * make sure that runtime_usage counter is incremented just once
1742 pdd->runtime_inuse = true;
1747 /* Create specific handle mapped to mem from process local memory idr
1748 * Assumes that the process lock is held.
1750 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
1753 return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
1756 /* Translate specific handle from process local memory idr
1757 * Assumes that the process lock is held.
1759 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
1765 return idr_find(&pdd->alloc_idr, handle);
1768 /* Remove specific handle from process local memory idr
1769 * Assumes that the process lock is held.
1771 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
1775 idr_remove(&pdd->alloc_idr, handle);
1778 /* This increments the process->ref counter. */
1779 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid)
1781 struct kfd_process *p, *ret_p = NULL;
1784 int idx = srcu_read_lock(&kfd_processes_srcu);
1786 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1787 if (p->pasid == pasid) {
1794 srcu_read_unlock(&kfd_processes_srcu, idx);
1799 /* This increments the process->ref counter. */
1800 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
1802 struct kfd_process *p;
1804 int idx = srcu_read_lock(&kfd_processes_srcu);
1806 p = find_process_by_mm(mm);
1810 srcu_read_unlock(&kfd_processes_srcu, idx);
1815 /* kfd_process_evict_queues - Evict all user queues of a process
1817 * Eviction is reference-counted per process-device. This means multiple
1818 * evictions from different sources can be nested safely.
1820 int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger)
1824 unsigned int n_evicted = 0;
1826 for (i = 0; i < p->n_pdds; i++) {
1827 struct kfd_process_device *pdd = p->pdds[i];
1829 kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid,
1832 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
1834 /* evict return -EIO if HWS is hang or asic is resetting, in this case
1835 * we would like to set all the queues to be in evicted state to prevent
1836 * them been add back since they actually not be saved right now.
1838 if (r && r != -EIO) {
1839 pr_err("Failed to evict process queues\n");
1848 /* To keep state consistent, roll back partial eviction by
1851 for (i = 0; i < p->n_pdds; i++) {
1852 struct kfd_process_device *pdd = p->pdds[i];
1857 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
1859 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1861 pr_err("Failed to restore queues\n");
1869 /* kfd_process_restore_queues - Restore all user queues of a process */
1870 int kfd_process_restore_queues(struct kfd_process *p)
1875 for (i = 0; i < p->n_pdds; i++) {
1876 struct kfd_process_device *pdd = p->pdds[i];
1878 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
1880 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1883 pr_err("Failed to restore process queues\n");
1892 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
1896 for (i = 0; i < p->n_pdds; i++)
1897 if (p->pdds[i] && gpu_id == p->pdds[i]->user_gpu_id)
1903 kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node,
1904 uint32_t *gpuid, uint32_t *gpuidx)
1908 for (i = 0; i < p->n_pdds; i++)
1909 if (p->pdds[i] && p->pdds[i]->dev == node) {
1910 *gpuid = p->pdds[i]->user_gpu_id;
1917 static int signal_eviction_fence(struct kfd_process *p)
1919 struct dma_fence *ef;
1923 ef = dma_fence_get_rcu_safe(&p->ef);
1926 ret = dma_fence_signal(ef);
1932 static void evict_process_worker(struct work_struct *work)
1935 struct kfd_process *p;
1936 struct delayed_work *dwork;
1938 dwork = to_delayed_work(work);
1940 /* Process termination destroys this worker thread. So during the
1941 * lifetime of this thread, kfd_process p will be valid
1943 p = container_of(dwork, struct kfd_process, eviction_work);
1945 pr_debug("Started evicting pasid 0x%x\n", p->pasid);
1946 ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM);
1948 /* If another thread already signaled the eviction fence,
1949 * they are responsible stopping the queues and scheduling
1952 if (!signal_eviction_fence(p))
1953 queue_delayed_work(kfd_restore_wq, &p->restore_work,
1954 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
1956 kfd_process_restore_queues(p);
1958 pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
1960 pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
1963 static int restore_process_helper(struct kfd_process *p)
1967 /* VMs may not have been acquired yet during debugging. */
1968 if (p->kgd_process_info) {
1969 ret = amdgpu_amdkfd_gpuvm_restore_process_bos(
1970 p->kgd_process_info, &p->ef);
1975 ret = kfd_process_restore_queues(p);
1977 pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
1979 pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
1984 static void restore_process_worker(struct work_struct *work)
1986 struct delayed_work *dwork;
1987 struct kfd_process *p;
1990 dwork = to_delayed_work(work);
1992 /* Process termination destroys this worker thread. So during the
1993 * lifetime of this thread, kfd_process p will be valid
1995 p = container_of(dwork, struct kfd_process, restore_work);
1996 pr_debug("Started restoring pasid 0x%x\n", p->pasid);
1998 /* Setting last_restore_timestamp before successful restoration.
1999 * Otherwise this would have to be set by KGD (restore_process_bos)
2000 * before KFD BOs are unreserved. If not, the process can be evicted
2001 * again before the timestamp is set.
2002 * If restore fails, the timestamp will be set again in the next
2003 * attempt. This would mean that the minimum GPU quanta would be
2004 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
2008 p->last_restore_timestamp = get_jiffies_64();
2010 ret = restore_process_helper(p);
2012 pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
2013 p->pasid, PROCESS_BACK_OFF_TIME_MS);
2014 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
2015 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
2016 WARN(!ret, "reschedule restore work failed\n");
2020 void kfd_suspend_all_processes(void)
2022 struct kfd_process *p;
2024 int idx = srcu_read_lock(&kfd_processes_srcu);
2026 WARN(debug_evictions, "Evicting all processes");
2027 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2028 if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND))
2029 pr_err("Failed to suspend process 0x%x\n", p->pasid);
2030 signal_eviction_fence(p);
2032 srcu_read_unlock(&kfd_processes_srcu, idx);
2035 int kfd_resume_all_processes(void)
2037 struct kfd_process *p;
2039 int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
2041 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2042 if (restore_process_helper(p)) {
2043 pr_err("Restore process %d failed during resume\n",
2048 srcu_read_unlock(&kfd_processes_srcu, idx);
2052 int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process,
2053 struct vm_area_struct *vma)
2055 struct kfd_process_device *pdd;
2056 struct qcm_process_device *qpd;
2058 if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
2059 pr_err("Incorrect CWSR mapping size.\n");
2063 pdd = kfd_get_process_device_data(dev, process);
2068 qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2069 get_order(KFD_CWSR_TBA_TMA_SIZE));
2070 if (!qpd->cwsr_kaddr) {
2071 pr_err("Error allocating per process CWSR buffer.\n");
2075 vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND
2076 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
2077 /* Mapping pages to user process */
2078 return remap_pfn_range(vma, vma->vm_start,
2079 PFN_DOWN(__pa(qpd->cwsr_kaddr)),
2080 KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
2083 /* assumes caller holds process lock. */
2084 int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
2086 uint32_t irq_drain_fence[8];
2087 uint8_t node_id = 0;
2090 if (!KFD_IS_SOC15(pdd->dev))
2093 pdd->process->irq_drain_is_open = true;
2095 memset(irq_drain_fence, 0, sizeof(irq_drain_fence));
2096 irq_drain_fence[0] = (KFD_IRQ_FENCE_SOURCEID << 8) |
2097 KFD_IRQ_FENCE_CLIENTID;
2098 irq_drain_fence[3] = pdd->process->pasid;
2101 * For GFX 9.4.3, send the NodeId also in IH cookie DW[3]
2103 if (KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 3)) {
2104 node_id = ffs(pdd->dev->interrupt_bitmap) - 1;
2105 irq_drain_fence[3] |= node_id << 16;
2108 /* ensure stale irqs scheduled KFD interrupts and send drain fence. */
2109 if (amdgpu_amdkfd_send_close_event_drain_irq(pdd->dev->adev,
2111 pdd->process->irq_drain_is_open = false;
2115 r = wait_event_interruptible(pdd->process->wait_irq_drain,
2116 !READ_ONCE(pdd->process->irq_drain_is_open));
2118 pdd->process->irq_drain_is_open = false;
2123 void kfd_process_close_interrupt_drain(unsigned int pasid)
2125 struct kfd_process *p;
2127 p = kfd_lookup_process_by_pasid(pasid);
2132 WRITE_ONCE(p->irq_drain_is_open, false);
2133 wake_up_all(&p->wait_irq_drain);
2134 kfd_unref_process(p);
2137 struct send_exception_work_handler_workarea {
2138 struct work_struct work;
2139 struct kfd_process *p;
2140 unsigned int queue_id;
2141 uint64_t error_reason;
2144 static void send_exception_work_handler(struct work_struct *work)
2146 struct send_exception_work_handler_workarea *workarea;
2147 struct kfd_process *p;
2149 struct mm_struct *mm;
2150 struct kfd_context_save_area_header __user *csa_header;
2151 uint64_t __user *err_payload_ptr;
2155 workarea = container_of(work,
2156 struct send_exception_work_handler_workarea,
2160 mm = get_task_mm(p->lead_thread);
2167 q = pqm_get_user_queue(&p->pqm, workarea->queue_id);
2172 csa_header = (void __user *)q->properties.ctx_save_restore_area_address;
2174 get_user(err_payload_ptr, (uint64_t __user **)&csa_header->err_payload_addr);
2175 get_user(cur_err, err_payload_ptr);
2176 cur_err |= workarea->error_reason;
2177 put_user(cur_err, err_payload_ptr);
2178 get_user(ev_id, &csa_header->err_event_id);
2180 kfd_set_event(p, ev_id);
2183 kthread_unuse_mm(mm);
2187 int kfd_send_exception_to_runtime(struct kfd_process *p,
2188 unsigned int queue_id,
2189 uint64_t error_reason)
2191 struct send_exception_work_handler_workarea worker;
2193 INIT_WORK_ONSTACK(&worker.work, send_exception_work_handler);
2196 worker.queue_id = queue_id;
2197 worker.error_reason = error_reason;
2199 schedule_work(&worker.work);
2200 flush_work(&worker.work);
2201 destroy_work_on_stack(&worker.work);
2206 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id)
2211 for (i = 0; i < p->n_pdds; i++) {
2212 struct kfd_process_device *pdd = p->pdds[i];
2214 if (pdd->user_gpu_id == gpu_id)
2221 int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id)
2228 for (i = 0; i < p->n_pdds; i++) {
2229 struct kfd_process_device *pdd = p->pdds[i];
2231 if (pdd->dev->id == actual_gpu_id)
2232 return pdd->user_gpu_id;
2237 #if defined(CONFIG_DEBUG_FS)
2239 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
2241 struct kfd_process *p;
2245 int idx = srcu_read_lock(&kfd_processes_srcu);
2247 hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2248 seq_printf(m, "Process %d PASID 0x%x:\n",
2249 p->lead_thread->tgid, p->pasid);
2251 mutex_lock(&p->mutex);
2252 r = pqm_debugfs_mqds(m, &p->pqm);
2253 mutex_unlock(&p->mutex);
2259 srcu_read_unlock(&kfd_processes_srcu, idx);