]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdkfd/kfd_process.c
Linux 6.14-rc3
[linux.git] / drivers / gpu / drm / amd / amdkfd / kfd_process.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23
24 #include <linux/mutex.h>
25 #include <linux/log2.h>
26 #include <linux/sched.h>
27 #include <linux/sched/mm.h>
28 #include <linux/sched/task.h>
29 #include <linux/mmu_context.h>
30 #include <linux/slab.h>
31 #include <linux/notifier.h>
32 #include <linux/compat.h>
33 #include <linux/mman.h>
34 #include <linux/file.h>
35 #include <linux/pm_runtime.h>
36 #include "amdgpu_amdkfd.h"
37 #include "amdgpu.h"
38
39 struct mm_struct;
40
41 #include "kfd_priv.h"
42 #include "kfd_device_queue_manager.h"
43 #include "kfd_svm.h"
44 #include "kfd_smi_events.h"
45 #include "kfd_debug.h"
46
47 /*
48  * List of struct kfd_process (field kfd_process).
49  * Unique/indexed by mm_struct*
50  */
51 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
52 DEFINE_MUTEX(kfd_processes_mutex);
53
54 DEFINE_SRCU(kfd_processes_srcu);
55
56 /* For process termination handling */
57 static struct workqueue_struct *kfd_process_wq;
58
59 /* Ordered, single-threaded workqueue for restoring evicted
60  * processes. Restoring multiple processes concurrently under memory
61  * pressure can lead to processes blocking each other from validating
62  * their BOs and result in a live-lock situation where processes
63  * remain evicted indefinitely.
64  */
65 static struct workqueue_struct *kfd_restore_wq;
66
67 static struct kfd_process *find_process(const struct task_struct *thread,
68                                         bool ref);
69 static void kfd_process_ref_release(struct kref *ref);
70 static struct kfd_process *create_process(const struct task_struct *thread);
71
72 static void evict_process_worker(struct work_struct *work);
73 static void restore_process_worker(struct work_struct *work);
74
75 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd);
76
77 struct kfd_procfs_tree {
78         struct kobject *kobj;
79 };
80
81 static struct kfd_procfs_tree procfs;
82
83 /*
84  * Structure for SDMA activity tracking
85  */
86 struct kfd_sdma_activity_handler_workarea {
87         struct work_struct sdma_activity_work;
88         struct kfd_process_device *pdd;
89         uint64_t sdma_activity_counter;
90 };
91
92 struct temp_sdma_queue_list {
93         uint64_t __user *rptr;
94         uint64_t sdma_val;
95         unsigned int queue_id;
96         struct list_head list;
97 };
98
99 static void kfd_sdma_activity_worker(struct work_struct *work)
100 {
101         struct kfd_sdma_activity_handler_workarea *workarea;
102         struct kfd_process_device *pdd;
103         uint64_t val;
104         struct mm_struct *mm;
105         struct queue *q;
106         struct qcm_process_device *qpd;
107         struct device_queue_manager *dqm;
108         int ret = 0;
109         struct temp_sdma_queue_list sdma_q_list;
110         struct temp_sdma_queue_list *sdma_q, *next;
111
112         workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
113                                 sdma_activity_work);
114
115         pdd = workarea->pdd;
116         if (!pdd)
117                 return;
118         dqm = pdd->dev->dqm;
119         qpd = &pdd->qpd;
120         if (!dqm || !qpd)
121                 return;
122         /*
123          * Total SDMA activity is current SDMA activity + past SDMA activity
124          * Past SDMA count is stored in pdd.
125          * To get the current activity counters for all active SDMA queues,
126          * we loop over all SDMA queues and get their counts from user-space.
127          *
128          * We cannot call get_user() with dqm_lock held as it can cause
129          * a circular lock dependency situation. To read the SDMA stats,
130          * we need to do the following:
131          *
132          * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list,
133          *    with dqm_lock/dqm_unlock().
134          * 2. Call get_user() for each node in temporary list without dqm_lock.
135          *    Save the SDMA count for each node and also add the count to the total
136          *    SDMA count counter.
137          *    Its possible, during this step, a few SDMA queue nodes got deleted
138          *    from the qpd->queues_list.
139          * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted.
140          *    If any node got deleted, its SDMA count would be captured in the sdma
141          *    past activity counter. So subtract the SDMA counter stored in step 2
142          *    for this node from the total SDMA count.
143          */
144         INIT_LIST_HEAD(&sdma_q_list.list);
145
146         /*
147          * Create the temp list of all SDMA queues
148          */
149         dqm_lock(dqm);
150
151         list_for_each_entry(q, &qpd->queues_list, list) {
152                 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
153                     (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
154                         continue;
155
156                 sdma_q = kzalloc(sizeof(struct temp_sdma_queue_list), GFP_KERNEL);
157                 if (!sdma_q) {
158                         dqm_unlock(dqm);
159                         goto cleanup;
160                 }
161
162                 INIT_LIST_HEAD(&sdma_q->list);
163                 sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr;
164                 sdma_q->queue_id = q->properties.queue_id;
165                 list_add_tail(&sdma_q->list, &sdma_q_list.list);
166         }
167
168         /*
169          * If the temp list is empty, then no SDMA queues nodes were found in
170          * qpd->queues_list. Return the past activity count as the total sdma
171          * count
172          */
173         if (list_empty(&sdma_q_list.list)) {
174                 workarea->sdma_activity_counter = pdd->sdma_past_activity_counter;
175                 dqm_unlock(dqm);
176                 return;
177         }
178
179         dqm_unlock(dqm);
180
181         /*
182          * Get the usage count for each SDMA queue in temp_list.
183          */
184         mm = get_task_mm(pdd->process->lead_thread);
185         if (!mm)
186                 goto cleanup;
187
188         kthread_use_mm(mm);
189
190         list_for_each_entry(sdma_q, &sdma_q_list.list, list) {
191                 val = 0;
192                 ret = read_sdma_queue_counter(sdma_q->rptr, &val);
193                 if (ret) {
194                         pr_debug("Failed to read SDMA queue active counter for queue id: %d",
195                                  sdma_q->queue_id);
196                 } else {
197                         sdma_q->sdma_val = val;
198                         workarea->sdma_activity_counter += val;
199                 }
200         }
201
202         kthread_unuse_mm(mm);
203         mmput(mm);
204
205         /*
206          * Do a second iteration over qpd_queues_list to check if any SDMA
207          * nodes got deleted while fetching SDMA counter.
208          */
209         dqm_lock(dqm);
210
211         workarea->sdma_activity_counter += pdd->sdma_past_activity_counter;
212
213         list_for_each_entry(q, &qpd->queues_list, list) {
214                 if (list_empty(&sdma_q_list.list))
215                         break;
216
217                 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
218                     (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
219                         continue;
220
221                 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
222                         if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) &&
223                              (sdma_q->queue_id == q->properties.queue_id)) {
224                                 list_del(&sdma_q->list);
225                                 kfree(sdma_q);
226                                 break;
227                         }
228                 }
229         }
230
231         dqm_unlock(dqm);
232
233         /*
234          * If temp list is not empty, it implies some queues got deleted
235          * from qpd->queues_list during SDMA usage read. Subtract the SDMA
236          * count for each node from the total SDMA count.
237          */
238         list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
239                 workarea->sdma_activity_counter -= sdma_q->sdma_val;
240                 list_del(&sdma_q->list);
241                 kfree(sdma_q);
242         }
243
244         return;
245
246 cleanup:
247         list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
248                 list_del(&sdma_q->list);
249                 kfree(sdma_q);
250         }
251 }
252
253 /**
254  * kfd_get_cu_occupancy - Collect number of waves in-flight on this device
255  * by current process. Translates acquired wave count into number of compute units
256  * that are occupied.
257  *
258  * @attr: Handle of attribute that allows reporting of wave count. The attribute
259  * handle encapsulates GPU device it is associated with, thereby allowing collection
260  * of waves in flight, etc
261  * @buffer: Handle of user provided buffer updated with wave count
262  *
263  * Return: Number of bytes written to user buffer or an error value
264  */
265 static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
266 {
267         int cu_cnt;
268         int wave_cnt;
269         int max_waves_per_cu;
270         struct kfd_node *dev = NULL;
271         struct kfd_process *proc = NULL;
272         struct kfd_process_device *pdd = NULL;
273         int i;
274         struct kfd_cu_occupancy *cu_occupancy;
275         u32 queue_format;
276
277         pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
278         dev = pdd->dev;
279         if (dev->kfd2kgd->get_cu_occupancy == NULL)
280                 return -EINVAL;
281
282         cu_cnt = 0;
283         proc = pdd->process;
284         if (pdd->qpd.queue_count == 0) {
285                 pr_debug("Gpu-Id: %d has no active queues for process %d\n",
286                          dev->id, proc->pasid);
287                 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
288         }
289
290         /* Collect wave count from device if it supports */
291         wave_cnt = 0;
292         max_waves_per_cu = 0;
293
294         cu_occupancy = kcalloc(AMDGPU_MAX_QUEUES, sizeof(*cu_occupancy), GFP_KERNEL);
295         if (!cu_occupancy)
296                 return -ENOMEM;
297
298         /*
299          * For GFX 9.4.3, fetch the CU occupancy from the first XCC in the partition.
300          * For AQL queues, because of cooperative dispatch we multiply the wave count
301          * by number of XCCs in the partition to get the total wave counts across all
302          * XCCs in the partition.
303          * For PM4 queues, there is no cooperative dispatch so wave_cnt stay as it is.
304          */
305         dev->kfd2kgd->get_cu_occupancy(dev->adev, cu_occupancy,
306                         &max_waves_per_cu, ffs(dev->xcc_mask) - 1);
307
308         for (i = 0; i < AMDGPU_MAX_QUEUES; i++) {
309                 if (cu_occupancy[i].wave_cnt != 0 &&
310                     kfd_dqm_is_queue_in_process(dev->dqm, &pdd->qpd,
311                                                 cu_occupancy[i].doorbell_off,
312                                                 &queue_format)) {
313                         if (unlikely(queue_format == KFD_QUEUE_FORMAT_PM4))
314                                 wave_cnt += cu_occupancy[i].wave_cnt;
315                         else
316                                 wave_cnt += (NUM_XCC(dev->xcc_mask) *
317                                                 cu_occupancy[i].wave_cnt);
318                 }
319         }
320
321         /* Translate wave count to number of compute units */
322         cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
323         kfree(cu_occupancy);
324         return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
325 }
326
327 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
328                                char *buffer)
329 {
330         if (strcmp(attr->name, "pasid") == 0) {
331                 struct kfd_process *p = container_of(attr, struct kfd_process,
332                                                      attr_pasid);
333
334                 return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
335         } else if (strncmp(attr->name, "vram_", 5) == 0) {
336                 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
337                                                               attr_vram);
338                 return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage));
339         } else if (strncmp(attr->name, "sdma_", 5) == 0) {
340                 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
341                                                               attr_sdma);
342                 struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
343
344                 INIT_WORK_ONSTACK(&sdma_activity_work_handler.sdma_activity_work,
345                                   kfd_sdma_activity_worker);
346
347                 sdma_activity_work_handler.pdd = pdd;
348                 sdma_activity_work_handler.sdma_activity_counter = 0;
349
350                 schedule_work(&sdma_activity_work_handler.sdma_activity_work);
351
352                 flush_work(&sdma_activity_work_handler.sdma_activity_work);
353                 destroy_work_on_stack(&sdma_activity_work_handler.sdma_activity_work);
354
355                 return snprintf(buffer, PAGE_SIZE, "%llu\n",
356                                 (sdma_activity_work_handler.sdma_activity_counter)/
357                                  SDMA_ACTIVITY_DIVISOR);
358         } else {
359                 pr_err("Invalid attribute");
360                 return -EINVAL;
361         }
362
363         return 0;
364 }
365
366 static void kfd_procfs_kobj_release(struct kobject *kobj)
367 {
368         kfree(kobj);
369 }
370
371 static const struct sysfs_ops kfd_procfs_ops = {
372         .show = kfd_procfs_show,
373 };
374
375 static const struct kobj_type procfs_type = {
376         .release = kfd_procfs_kobj_release,
377         .sysfs_ops = &kfd_procfs_ops,
378 };
379
380 void kfd_procfs_init(void)
381 {
382         int ret = 0;
383
384         procfs.kobj = kfd_alloc_struct(procfs.kobj);
385         if (!procfs.kobj)
386                 return;
387
388         ret = kobject_init_and_add(procfs.kobj, &procfs_type,
389                                    &kfd_device->kobj, "proc");
390         if (ret) {
391                 pr_warn("Could not create procfs proc folder");
392                 /* If we fail to create the procfs, clean up */
393                 kfd_procfs_shutdown();
394         }
395 }
396
397 void kfd_procfs_shutdown(void)
398 {
399         if (procfs.kobj) {
400                 kobject_del(procfs.kobj);
401                 kobject_put(procfs.kobj);
402                 procfs.kobj = NULL;
403         }
404 }
405
406 static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
407                                      struct attribute *attr, char *buffer)
408 {
409         struct queue *q = container_of(kobj, struct queue, kobj);
410
411         if (!strcmp(attr->name, "size"))
412                 return snprintf(buffer, PAGE_SIZE, "%llu",
413                                 q->properties.queue_size);
414         else if (!strcmp(attr->name, "type"))
415                 return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
416         else if (!strcmp(attr->name, "gpuid"))
417                 return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
418         else
419                 pr_err("Invalid attribute");
420
421         return 0;
422 }
423
424 static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
425                                      struct attribute *attr, char *buffer)
426 {
427         if (strcmp(attr->name, "evicted_ms") == 0) {
428                 struct kfd_process_device *pdd = container_of(attr,
429                                 struct kfd_process_device,
430                                 attr_evict);
431                 uint64_t evict_jiffies;
432
433                 evict_jiffies = atomic64_read(&pdd->evict_duration_counter);
434
435                 return snprintf(buffer,
436                                 PAGE_SIZE,
437                                 "%llu\n",
438                                 jiffies64_to_msecs(evict_jiffies));
439
440         /* Sysfs handle that gets CU occupancy is per device */
441         } else if (strcmp(attr->name, "cu_occupancy") == 0) {
442                 return kfd_get_cu_occupancy(attr, buffer);
443         } else {
444                 pr_err("Invalid attribute");
445         }
446
447         return 0;
448 }
449
450 static ssize_t kfd_sysfs_counters_show(struct kobject *kobj,
451                                        struct attribute *attr, char *buf)
452 {
453         struct kfd_process_device *pdd;
454
455         if (!strcmp(attr->name, "faults")) {
456                 pdd = container_of(attr, struct kfd_process_device,
457                                    attr_faults);
458                 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults));
459         }
460         if (!strcmp(attr->name, "page_in")) {
461                 pdd = container_of(attr, struct kfd_process_device,
462                                    attr_page_in);
463                 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in));
464         }
465         if (!strcmp(attr->name, "page_out")) {
466                 pdd = container_of(attr, struct kfd_process_device,
467                                    attr_page_out);
468                 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out));
469         }
470         return 0;
471 }
472
473 static struct attribute attr_queue_size = {
474         .name = "size",
475         .mode = KFD_SYSFS_FILE_MODE
476 };
477
478 static struct attribute attr_queue_type = {
479         .name = "type",
480         .mode = KFD_SYSFS_FILE_MODE
481 };
482
483 static struct attribute attr_queue_gpuid = {
484         .name = "gpuid",
485         .mode = KFD_SYSFS_FILE_MODE
486 };
487
488 static struct attribute *procfs_queue_attrs[] = {
489         &attr_queue_size,
490         &attr_queue_type,
491         &attr_queue_gpuid,
492         NULL
493 };
494 ATTRIBUTE_GROUPS(procfs_queue);
495
496 static const struct sysfs_ops procfs_queue_ops = {
497         .show = kfd_procfs_queue_show,
498 };
499
500 static const struct kobj_type procfs_queue_type = {
501         .sysfs_ops = &procfs_queue_ops,
502         .default_groups = procfs_queue_groups,
503 };
504
505 static const struct sysfs_ops procfs_stats_ops = {
506         .show = kfd_procfs_stats_show,
507 };
508
509 static const struct kobj_type procfs_stats_type = {
510         .sysfs_ops = &procfs_stats_ops,
511         .release = kfd_procfs_kobj_release,
512 };
513
514 static const struct sysfs_ops sysfs_counters_ops = {
515         .show = kfd_sysfs_counters_show,
516 };
517
518 static const struct kobj_type sysfs_counters_type = {
519         .sysfs_ops = &sysfs_counters_ops,
520         .release = kfd_procfs_kobj_release,
521 };
522
523 int kfd_procfs_add_queue(struct queue *q)
524 {
525         struct kfd_process *proc;
526         int ret;
527
528         if (!q || !q->process)
529                 return -EINVAL;
530         proc = q->process;
531
532         /* Create proc/<pid>/queues/<queue id> folder */
533         if (!proc->kobj_queues)
534                 return -EFAULT;
535         ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
536                         proc->kobj_queues, "%u", q->properties.queue_id);
537         if (ret < 0) {
538                 pr_warn("Creating proc/<pid>/queues/%u failed",
539                         q->properties.queue_id);
540                 kobject_put(&q->kobj);
541                 return ret;
542         }
543
544         return 0;
545 }
546
547 static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr,
548                                  char *name)
549 {
550         int ret;
551
552         if (!kobj || !attr || !name)
553                 return;
554
555         attr->name = name;
556         attr->mode = KFD_SYSFS_FILE_MODE;
557         sysfs_attr_init(attr);
558
559         ret = sysfs_create_file(kobj, attr);
560         if (ret)
561                 pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret);
562 }
563
564 static void kfd_procfs_add_sysfs_stats(struct kfd_process *p)
565 {
566         int ret;
567         int i;
568         char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
569
570         if (!p || !p->kobj)
571                 return;
572
573         /*
574          * Create sysfs files for each GPU:
575          * - proc/<pid>/stats_<gpuid>/
576          * - proc/<pid>/stats_<gpuid>/evicted_ms
577          * - proc/<pid>/stats_<gpuid>/cu_occupancy
578          */
579         for (i = 0; i < p->n_pdds; i++) {
580                 struct kfd_process_device *pdd = p->pdds[i];
581
582                 snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
583                                 "stats_%u", pdd->dev->id);
584                 pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats);
585                 if (!pdd->kobj_stats)
586                         return;
587
588                 ret = kobject_init_and_add(pdd->kobj_stats,
589                                            &procfs_stats_type,
590                                            p->kobj,
591                                            stats_dir_filename);
592
593                 if (ret) {
594                         pr_warn("Creating KFD proc/stats_%s folder failed",
595                                 stats_dir_filename);
596                         kobject_put(pdd->kobj_stats);
597                         pdd->kobj_stats = NULL;
598                         return;
599                 }
600
601                 kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict,
602                                       "evicted_ms");
603                 /* Add sysfs file to report compute unit occupancy */
604                 if (pdd->dev->kfd2kgd->get_cu_occupancy)
605                         kfd_sysfs_create_file(pdd->kobj_stats,
606                                               &pdd->attr_cu_occupancy,
607                                               "cu_occupancy");
608         }
609 }
610
611 static void kfd_procfs_add_sysfs_counters(struct kfd_process *p)
612 {
613         int ret = 0;
614         int i;
615         char counters_dir_filename[MAX_SYSFS_FILENAME_LEN];
616
617         if (!p || !p->kobj)
618                 return;
619
620         /*
621          * Create sysfs files for each GPU which supports SVM
622          * - proc/<pid>/counters_<gpuid>/
623          * - proc/<pid>/counters_<gpuid>/faults
624          * - proc/<pid>/counters_<gpuid>/page_in
625          * - proc/<pid>/counters_<gpuid>/page_out
626          */
627         for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
628                 struct kfd_process_device *pdd = p->pdds[i];
629                 struct kobject *kobj_counters;
630
631                 snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN,
632                         "counters_%u", pdd->dev->id);
633                 kobj_counters = kfd_alloc_struct(kobj_counters);
634                 if (!kobj_counters)
635                         return;
636
637                 ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type,
638                                            p->kobj, counters_dir_filename);
639                 if (ret) {
640                         pr_warn("Creating KFD proc/%s folder failed",
641                                 counters_dir_filename);
642                         kobject_put(kobj_counters);
643                         return;
644                 }
645
646                 pdd->kobj_counters = kobj_counters;
647                 kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults,
648                                       "faults");
649                 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in,
650                                       "page_in");
651                 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out,
652                                       "page_out");
653         }
654 }
655
656 static void kfd_procfs_add_sysfs_files(struct kfd_process *p)
657 {
658         int i;
659
660         if (!p || !p->kobj)
661                 return;
662
663         /*
664          * Create sysfs files for each GPU:
665          * - proc/<pid>/vram_<gpuid>
666          * - proc/<pid>/sdma_<gpuid>
667          */
668         for (i = 0; i < p->n_pdds; i++) {
669                 struct kfd_process_device *pdd = p->pdds[i];
670
671                 snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
672                          pdd->dev->id);
673                 kfd_sysfs_create_file(p->kobj, &pdd->attr_vram,
674                                       pdd->vram_filename);
675
676                 snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
677                          pdd->dev->id);
678                 kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma,
679                                             pdd->sdma_filename);
680         }
681 }
682
683 void kfd_procfs_del_queue(struct queue *q)
684 {
685         if (!q)
686                 return;
687
688         kobject_del(&q->kobj);
689         kobject_put(&q->kobj);
690 }
691
692 int kfd_process_create_wq(void)
693 {
694         if (!kfd_process_wq)
695                 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
696         if (!kfd_restore_wq)
697                 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq",
698                                                          WQ_FREEZABLE);
699
700         if (!kfd_process_wq || !kfd_restore_wq) {
701                 kfd_process_destroy_wq();
702                 return -ENOMEM;
703         }
704
705         return 0;
706 }
707
708 void kfd_process_destroy_wq(void)
709 {
710         if (kfd_process_wq) {
711                 destroy_workqueue(kfd_process_wq);
712                 kfd_process_wq = NULL;
713         }
714         if (kfd_restore_wq) {
715                 destroy_workqueue(kfd_restore_wq);
716                 kfd_restore_wq = NULL;
717         }
718 }
719
720 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
721                         struct kfd_process_device *pdd, void **kptr)
722 {
723         struct kfd_node *dev = pdd->dev;
724
725         if (kptr && *kptr) {
726                 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
727                 *kptr = NULL;
728         }
729
730         amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv);
731         amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv,
732                                                NULL);
733 }
734
735 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
736  *      This function should be only called right after the process
737  *      is created and when kfd_processes_mutex is still being held
738  *      to avoid concurrency. Because of that exclusiveness, we do
739  *      not need to take p->mutex.
740  */
741 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
742                                    uint64_t gpu_va, uint32_t size,
743                                    uint32_t flags, struct kgd_mem **mem, void **kptr)
744 {
745         struct kfd_node *kdev = pdd->dev;
746         int err;
747
748         err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size,
749                                                  pdd->drm_priv, mem, NULL,
750                                                  flags, false);
751         if (err)
752                 goto err_alloc_mem;
753
754         err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem,
755                         pdd->drm_priv);
756         if (err)
757                 goto err_map_mem;
758
759         err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true);
760         if (err) {
761                 pr_debug("Sync memory failed, wait interrupted by user signal\n");
762                 goto sync_memory_failed;
763         }
764
765         if (kptr) {
766                 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(
767                                 (struct kgd_mem *)*mem, kptr, NULL);
768                 if (err) {
769                         pr_debug("Map GTT BO to kernel failed\n");
770                         goto sync_memory_failed;
771                 }
772         }
773
774         return err;
775
776 sync_memory_failed:
777         amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv);
778
779 err_map_mem:
780         amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv,
781                                                NULL);
782 err_alloc_mem:
783         *mem = NULL;
784         *kptr = NULL;
785         return err;
786 }
787
788 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
789  *      process for IB usage The memory reserved is for KFD to submit
790  *      IB to AMDGPU from kernel.  If the memory is reserved
791  *      successfully, ib_kaddr will have the CPU/kernel
792  *      address. Check ib_kaddr before accessing the memory.
793  */
794 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
795 {
796         struct qcm_process_device *qpd = &pdd->qpd;
797         uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
798                         KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
799                         KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
800                         KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
801         struct kgd_mem *mem;
802         void *kaddr;
803         int ret;
804
805         if (qpd->ib_kaddr || !qpd->ib_base)
806                 return 0;
807
808         /* ib_base is only set for dGPU */
809         ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
810                                       &mem, &kaddr);
811         if (ret)
812                 return ret;
813
814         qpd->ib_mem = mem;
815         qpd->ib_kaddr = kaddr;
816
817         return 0;
818 }
819
820 static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd)
821 {
822         struct qcm_process_device *qpd = &pdd->qpd;
823
824         if (!qpd->ib_kaddr || !qpd->ib_base)
825                 return;
826
827         kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr);
828 }
829
830 struct kfd_process *kfd_create_process(struct task_struct *thread)
831 {
832         struct kfd_process *process;
833         int ret;
834
835         if (!(thread->mm && mmget_not_zero(thread->mm)))
836                 return ERR_PTR(-EINVAL);
837
838         /* Only the pthreads threading model is supported. */
839         if (thread->group_leader->mm != thread->mm) {
840                 mmput(thread->mm);
841                 return ERR_PTR(-EINVAL);
842         }
843
844         /*
845          * take kfd processes mutex before starting of process creation
846          * so there won't be a case where two threads of the same process
847          * create two kfd_process structures
848          */
849         mutex_lock(&kfd_processes_mutex);
850
851         if (kfd_is_locked()) {
852                 pr_debug("KFD is locked! Cannot create process");
853                 process = ERR_PTR(-EINVAL);
854                 goto out;
855         }
856
857         /* A prior open of /dev/kfd could have already created the process.
858          * find_process will increase process kref in this case
859          */
860         process = find_process(thread, true);
861         if (process) {
862                 pr_debug("Process already found\n");
863         } else {
864                 /* If the process just called exec(3), it is possible that the
865                  * cleanup of the kfd_process (following the release of the mm
866                  * of the old process image) is still in the cleanup work queue.
867                  * Make sure to drain any job before trying to recreate any
868                  * resource for this process.
869                  */
870                 flush_workqueue(kfd_process_wq);
871
872                 process = create_process(thread);
873                 if (IS_ERR(process))
874                         goto out;
875
876                 if (!procfs.kobj)
877                         goto out;
878
879                 process->kobj = kfd_alloc_struct(process->kobj);
880                 if (!process->kobj) {
881                         pr_warn("Creating procfs kobject failed");
882                         goto out;
883                 }
884                 ret = kobject_init_and_add(process->kobj, &procfs_type,
885                                            procfs.kobj, "%d",
886                                            (int)process->lead_thread->pid);
887                 if (ret) {
888                         pr_warn("Creating procfs pid directory failed");
889                         kobject_put(process->kobj);
890                         goto out;
891                 }
892
893                 kfd_sysfs_create_file(process->kobj, &process->attr_pasid,
894                                       "pasid");
895
896                 process->kobj_queues = kobject_create_and_add("queues",
897                                                         process->kobj);
898                 if (!process->kobj_queues)
899                         pr_warn("Creating KFD proc/queues folder failed");
900
901                 kfd_procfs_add_sysfs_stats(process);
902                 kfd_procfs_add_sysfs_files(process);
903                 kfd_procfs_add_sysfs_counters(process);
904
905                 init_waitqueue_head(&process->wait_irq_drain);
906         }
907 out:
908         mutex_unlock(&kfd_processes_mutex);
909         mmput(thread->mm);
910
911         return process;
912 }
913
914 struct kfd_process *kfd_get_process(const struct task_struct *thread)
915 {
916         struct kfd_process *process;
917
918         if (!thread->mm)
919                 return ERR_PTR(-EINVAL);
920
921         /* Only the pthreads threading model is supported. */
922         if (thread->group_leader->mm != thread->mm)
923                 return ERR_PTR(-EINVAL);
924
925         process = find_process(thread, false);
926         if (!process)
927                 return ERR_PTR(-EINVAL);
928
929         return process;
930 }
931
932 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
933 {
934         struct kfd_process *process;
935
936         hash_for_each_possible_rcu(kfd_processes_table, process,
937                                         kfd_processes, (uintptr_t)mm)
938                 if (process->mm == mm)
939                         return process;
940
941         return NULL;
942 }
943
944 static struct kfd_process *find_process(const struct task_struct *thread,
945                                         bool ref)
946 {
947         struct kfd_process *p;
948         int idx;
949
950         idx = srcu_read_lock(&kfd_processes_srcu);
951         p = find_process_by_mm(thread->mm);
952         if (p && ref)
953                 kref_get(&p->ref);
954         srcu_read_unlock(&kfd_processes_srcu, idx);
955
956         return p;
957 }
958
959 void kfd_unref_process(struct kfd_process *p)
960 {
961         kref_put(&p->ref, kfd_process_ref_release);
962 }
963
964 /* This increments the process->ref counter. */
965 struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid)
966 {
967         struct task_struct *task = NULL;
968         struct kfd_process *p    = NULL;
969
970         if (!pid) {
971                 task = current;
972                 get_task_struct(task);
973         } else {
974                 task = get_pid_task(pid, PIDTYPE_PID);
975         }
976
977         if (task) {
978                 p = find_process(task, true);
979                 put_task_struct(task);
980         }
981
982         return p;
983 }
984
985 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
986 {
987         struct kfd_process *p = pdd->process;
988         void *mem;
989         int id;
990         int i;
991
992         /*
993          * Remove all handles from idr and release appropriate
994          * local memory object
995          */
996         idr_for_each_entry(&pdd->alloc_idr, mem, id) {
997
998                 for (i = 0; i < p->n_pdds; i++) {
999                         struct kfd_process_device *peer_pdd = p->pdds[i];
1000
1001                         if (!peer_pdd->drm_priv)
1002                                 continue;
1003                         amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1004                                 peer_pdd->dev->adev, mem, peer_pdd->drm_priv);
1005                 }
1006
1007                 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem,
1008                                                        pdd->drm_priv, NULL);
1009                 kfd_process_device_remove_obj_handle(pdd, id);
1010         }
1011 }
1012
1013 /*
1014  * Just kunmap and unpin signal BO here. It will be freed in
1015  * kfd_process_free_outstanding_kfd_bos()
1016  */
1017 static void kfd_process_kunmap_signal_bo(struct kfd_process *p)
1018 {
1019         struct kfd_process_device *pdd;
1020         struct kfd_node *kdev;
1021         void *mem;
1022
1023         kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle));
1024         if (!kdev)
1025                 return;
1026
1027         mutex_lock(&p->mutex);
1028
1029         pdd = kfd_get_process_device_data(kdev, p);
1030         if (!pdd)
1031                 goto out;
1032
1033         mem = kfd_process_device_translate_handle(
1034                 pdd, GET_IDR_HANDLE(p->signal_handle));
1035         if (!mem)
1036                 goto out;
1037
1038         amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
1039
1040 out:
1041         mutex_unlock(&p->mutex);
1042 }
1043
1044 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
1045 {
1046         int i;
1047
1048         for (i = 0; i < p->n_pdds; i++)
1049                 kfd_process_device_free_bos(p->pdds[i]);
1050 }
1051
1052 static void kfd_process_destroy_pdds(struct kfd_process *p)
1053 {
1054         int i;
1055
1056         for (i = 0; i < p->n_pdds; i++) {
1057                 struct kfd_process_device *pdd = p->pdds[i];
1058
1059                 pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
1060                                 pdd->dev->id, p->pasid);
1061
1062                 kfd_process_device_destroy_cwsr_dgpu(pdd);
1063                 kfd_process_device_destroy_ib_mem(pdd);
1064
1065                 if (pdd->drm_file) {
1066                         amdgpu_amdkfd_gpuvm_release_process_vm(
1067                                         pdd->dev->adev, pdd->drm_priv);
1068                         fput(pdd->drm_file);
1069                 }
1070
1071                 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
1072                         free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
1073                                 get_order(KFD_CWSR_TBA_TMA_SIZE));
1074
1075                 idr_destroy(&pdd->alloc_idr);
1076
1077                 kfd_free_process_doorbells(pdd->dev->kfd, pdd);
1078
1079                 if (pdd->dev->kfd->shared_resources.enable_mes &&
1080                         pdd->proc_ctx_cpu_ptr)
1081                         amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
1082                                                    &pdd->proc_ctx_bo);
1083                 /*
1084                  * before destroying pdd, make sure to report availability
1085                  * for auto suspend
1086                  */
1087                 if (pdd->runtime_inuse) {
1088                         pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev);
1089                         pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev);
1090                         pdd->runtime_inuse = false;
1091                 }
1092
1093                 kfree(pdd);
1094                 p->pdds[i] = NULL;
1095         }
1096         p->n_pdds = 0;
1097 }
1098
1099 static void kfd_process_remove_sysfs(struct kfd_process *p)
1100 {
1101         struct kfd_process_device *pdd;
1102         int i;
1103
1104         if (!p->kobj)
1105                 return;
1106
1107         sysfs_remove_file(p->kobj, &p->attr_pasid);
1108         kobject_del(p->kobj_queues);
1109         kobject_put(p->kobj_queues);
1110         p->kobj_queues = NULL;
1111
1112         for (i = 0; i < p->n_pdds; i++) {
1113                 pdd = p->pdds[i];
1114
1115                 sysfs_remove_file(p->kobj, &pdd->attr_vram);
1116                 sysfs_remove_file(p->kobj, &pdd->attr_sdma);
1117
1118                 sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict);
1119                 if (pdd->dev->kfd2kgd->get_cu_occupancy)
1120                         sysfs_remove_file(pdd->kobj_stats,
1121                                           &pdd->attr_cu_occupancy);
1122                 kobject_del(pdd->kobj_stats);
1123                 kobject_put(pdd->kobj_stats);
1124                 pdd->kobj_stats = NULL;
1125         }
1126
1127         for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
1128                 pdd = p->pdds[i];
1129
1130                 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults);
1131                 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in);
1132                 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out);
1133                 kobject_del(pdd->kobj_counters);
1134                 kobject_put(pdd->kobj_counters);
1135                 pdd->kobj_counters = NULL;
1136         }
1137
1138         kobject_del(p->kobj);
1139         kobject_put(p->kobj);
1140         p->kobj = NULL;
1141 }
1142
1143 /* No process locking is needed in this function, because the process
1144  * is not findable any more. We must assume that no other thread is
1145  * using it any more, otherwise we couldn't safely free the process
1146  * structure in the end.
1147  */
1148 static void kfd_process_wq_release(struct work_struct *work)
1149 {
1150         struct kfd_process *p = container_of(work, struct kfd_process,
1151                                              release_work);
1152         struct dma_fence *ef;
1153
1154         kfd_process_dequeue_from_all_devices(p);
1155         pqm_uninit(&p->pqm);
1156
1157         /* Signal the eviction fence after user mode queues are
1158          * destroyed. This allows any BOs to be freed without
1159          * triggering pointless evictions or waiting for fences.
1160          */
1161         synchronize_rcu();
1162         ef = rcu_access_pointer(p->ef);
1163         if (ef)
1164                 dma_fence_signal(ef);
1165
1166         kfd_process_remove_sysfs(p);
1167
1168         kfd_process_kunmap_signal_bo(p);
1169         kfd_process_free_outstanding_kfd_bos(p);
1170         svm_range_list_fini(p);
1171
1172         kfd_process_destroy_pdds(p);
1173         dma_fence_put(ef);
1174
1175         kfd_event_free_process(p);
1176
1177         kfd_pasid_free(p->pasid);
1178         mutex_destroy(&p->mutex);
1179
1180         put_task_struct(p->lead_thread);
1181
1182         kfree(p);
1183 }
1184
1185 static void kfd_process_ref_release(struct kref *ref)
1186 {
1187         struct kfd_process *p = container_of(ref, struct kfd_process, ref);
1188
1189         INIT_WORK(&p->release_work, kfd_process_wq_release);
1190         queue_work(kfd_process_wq, &p->release_work);
1191 }
1192
1193 static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
1194 {
1195         /* This increments p->ref counter if kfd process p exists */
1196         struct kfd_process *p = kfd_lookup_process_by_mm(mm);
1197
1198         return p ? &p->mmu_notifier : ERR_PTR(-ESRCH);
1199 }
1200
1201 static void kfd_process_free_notifier(struct mmu_notifier *mn)
1202 {
1203         kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
1204 }
1205
1206 static void kfd_process_notifier_release_internal(struct kfd_process *p)
1207 {
1208         int i;
1209
1210         cancel_delayed_work_sync(&p->eviction_work);
1211         cancel_delayed_work_sync(&p->restore_work);
1212
1213         for (i = 0; i < p->n_pdds; i++) {
1214                 struct kfd_process_device *pdd = p->pdds[i];
1215
1216                 /* re-enable GFX OFF since runtime enable with ttmp setup disabled it. */
1217                 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev) && p->runtime_info.ttmp_setup)
1218                         amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
1219         }
1220
1221         /* Indicate to other users that MM is no longer valid */
1222         p->mm = NULL;
1223         kfd_dbg_trap_disable(p);
1224
1225         if (atomic_read(&p->debugged_process_count) > 0) {
1226                 struct kfd_process *target;
1227                 unsigned int temp;
1228                 int idx = srcu_read_lock(&kfd_processes_srcu);
1229
1230                 hash_for_each_rcu(kfd_processes_table, temp, target, kfd_processes) {
1231                         if (target->debugger_process && target->debugger_process == p) {
1232                                 mutex_lock_nested(&target->mutex, 1);
1233                                 kfd_dbg_trap_disable(target);
1234                                 mutex_unlock(&target->mutex);
1235                                 if (atomic_read(&p->debugged_process_count) == 0)
1236                                         break;
1237                         }
1238                 }
1239
1240                 srcu_read_unlock(&kfd_processes_srcu, idx);
1241         }
1242
1243         mmu_notifier_put(&p->mmu_notifier);
1244 }
1245
1246 static void kfd_process_notifier_release(struct mmu_notifier *mn,
1247                                         struct mm_struct *mm)
1248 {
1249         struct kfd_process *p;
1250
1251         /*
1252          * The kfd_process structure can not be free because the
1253          * mmu_notifier srcu is read locked
1254          */
1255         p = container_of(mn, struct kfd_process, mmu_notifier);
1256         if (WARN_ON(p->mm != mm))
1257                 return;
1258
1259         mutex_lock(&kfd_processes_mutex);
1260         /*
1261          * Do early return if table is empty.
1262          *
1263          * This could potentially happen if this function is called concurrently
1264          * by mmu_notifier and by kfd_cleanup_pocesses.
1265          *
1266          */
1267         if (hash_empty(kfd_processes_table)) {
1268                 mutex_unlock(&kfd_processes_mutex);
1269                 return;
1270         }
1271         hash_del_rcu(&p->kfd_processes);
1272         mutex_unlock(&kfd_processes_mutex);
1273         synchronize_srcu(&kfd_processes_srcu);
1274
1275         kfd_process_notifier_release_internal(p);
1276 }
1277
1278 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
1279         .release = kfd_process_notifier_release,
1280         .alloc_notifier = kfd_process_alloc_notifier,
1281         .free_notifier = kfd_process_free_notifier,
1282 };
1283
1284 /*
1285  * This code handles the case when driver is being unloaded before all
1286  * mm_struct are released.  We need to safely free the kfd_process and
1287  * avoid race conditions with mmu_notifier that might try to free them.
1288  *
1289  */
1290 void kfd_cleanup_processes(void)
1291 {
1292         struct kfd_process *p;
1293         struct hlist_node *p_temp;
1294         unsigned int temp;
1295         HLIST_HEAD(cleanup_list);
1296
1297         /*
1298          * Move all remaining kfd_process from the process table to a
1299          * temp list for processing.   Once done, callback from mmu_notifier
1300          * release will not see the kfd_process in the table and do early return,
1301          * avoiding double free issues.
1302          */
1303         mutex_lock(&kfd_processes_mutex);
1304         hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
1305                 hash_del_rcu(&p->kfd_processes);
1306                 synchronize_srcu(&kfd_processes_srcu);
1307                 hlist_add_head(&p->kfd_processes, &cleanup_list);
1308         }
1309         mutex_unlock(&kfd_processes_mutex);
1310
1311         hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
1312                 kfd_process_notifier_release_internal(p);
1313
1314         /*
1315          * Ensures that all outstanding free_notifier get called, triggering
1316          * the release of the kfd_process struct.
1317          */
1318         mmu_notifier_synchronize();
1319 }
1320
1321 int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
1322 {
1323         unsigned long  offset;
1324         int i;
1325
1326         if (p->has_cwsr)
1327                 return 0;
1328
1329         for (i = 0; i < p->n_pdds; i++) {
1330                 struct kfd_node *dev = p->pdds[i]->dev;
1331                 struct qcm_process_device *qpd = &p->pdds[i]->qpd;
1332
1333                 if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
1334                         continue;
1335
1336                 offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
1337                 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
1338                         KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
1339                         MAP_SHARED, offset);
1340
1341                 if (IS_ERR_VALUE(qpd->tba_addr)) {
1342                         int err = qpd->tba_addr;
1343
1344                         dev_err(dev->adev->dev,
1345                                 "Failure to set tba address. error %d.\n", err);
1346                         qpd->tba_addr = 0;
1347                         qpd->cwsr_kaddr = NULL;
1348                         return err;
1349                 }
1350
1351                 memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
1352
1353                 kfd_process_set_trap_debug_flag(qpd, p->debug_trap_enabled);
1354
1355                 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1356                 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1357                         qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1358         }
1359
1360         p->has_cwsr = true;
1361
1362         return 0;
1363 }
1364
1365 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
1366 {
1367         struct kfd_node *dev = pdd->dev;
1368         struct qcm_process_device *qpd = &pdd->qpd;
1369         uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
1370                         | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
1371                         | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1372         struct kgd_mem *mem;
1373         void *kaddr;
1374         int ret;
1375
1376         if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
1377                 return 0;
1378
1379         /* cwsr_base is only set for dGPU */
1380         ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
1381                                       KFD_CWSR_TBA_TMA_SIZE, flags, &mem, &kaddr);
1382         if (ret)
1383                 return ret;
1384
1385         qpd->cwsr_mem = mem;
1386         qpd->cwsr_kaddr = kaddr;
1387         qpd->tba_addr = qpd->cwsr_base;
1388
1389         memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
1390
1391         kfd_process_set_trap_debug_flag(&pdd->qpd,
1392                                         pdd->process->debug_trap_enabled);
1393
1394         qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1395         pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1396                  qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1397
1398         return 0;
1399 }
1400
1401 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd)
1402 {
1403         struct kfd_node *dev = pdd->dev;
1404         struct qcm_process_device *qpd = &pdd->qpd;
1405
1406         if (!dev->kfd->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
1407                 return;
1408
1409         kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr);
1410 }
1411
1412 void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
1413                                   uint64_t tba_addr,
1414                                   uint64_t tma_addr)
1415 {
1416         if (qpd->cwsr_kaddr) {
1417                 /* KFD trap handler is bound, record as second-level TBA/TMA
1418                  * in first-level TMA. First-level trap will jump to second.
1419                  */
1420                 uint64_t *tma =
1421                         (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1422                 tma[0] = tba_addr;
1423                 tma[1] = tma_addr;
1424         } else {
1425                 /* No trap handler bound, bind as first-level TBA/TMA. */
1426                 qpd->tba_addr = tba_addr;
1427                 qpd->tma_addr = tma_addr;
1428         }
1429 }
1430
1431 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
1432 {
1433         int i;
1434
1435         /* On most GFXv9 GPUs, the retry mode in the SQ must match the
1436          * boot time retry setting. Mixing processes with different
1437          * XNACK/retry settings can hang the GPU.
1438          *
1439          * Different GPUs can have different noretry settings depending
1440          * on HW bugs or limitations. We need to find at least one
1441          * XNACK mode for this process that's compatible with all GPUs.
1442          * Fortunately GPUs with retry enabled (noretry=0) can run code
1443          * built for XNACK-off. On GFXv9 it may perform slower.
1444          *
1445          * Therefore applications built for XNACK-off can always be
1446          * supported and will be our fallback if any GPU does not
1447          * support retry.
1448          */
1449         for (i = 0; i < p->n_pdds; i++) {
1450                 struct kfd_node *dev = p->pdds[i]->dev;
1451
1452                 /* Only consider GFXv9 and higher GPUs. Older GPUs don't
1453                  * support the SVM APIs and don't need to be considered
1454                  * for the XNACK mode selection.
1455                  */
1456                 if (!KFD_IS_SOC15(dev))
1457                         continue;
1458                 /* Aldebaran can always support XNACK because it can support
1459                  * per-process XNACK mode selection. But let the dev->noretry
1460                  * setting still influence the default XNACK mode.
1461                  */
1462                 if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev)) {
1463                         if (!amdgpu_sriov_xnack_support(dev->kfd->adev)) {
1464                                 pr_debug("SRIOV platform xnack not supported\n");
1465                                 return false;
1466                         }
1467                         continue;
1468                 }
1469
1470                 /* GFXv10 and later GPUs do not support shader preemption
1471                  * during page faults. This can lead to poor QoS for queue
1472                  * management and memory-manager-related preemptions or
1473                  * even deadlocks.
1474                  */
1475                 if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
1476                         return false;
1477
1478                 if (dev->kfd->noretry)
1479                         return false;
1480         }
1481
1482         return true;
1483 }
1484
1485 void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd,
1486                                      bool enabled)
1487 {
1488         if (qpd->cwsr_kaddr) {
1489                 uint64_t *tma =
1490                         (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1491                 tma[2] = enabled;
1492         }
1493 }
1494
1495 /*
1496  * On return the kfd_process is fully operational and will be freed when the
1497  * mm is released
1498  */
1499 static struct kfd_process *create_process(const struct task_struct *thread)
1500 {
1501         struct kfd_process *process;
1502         struct mmu_notifier *mn;
1503         int err = -ENOMEM;
1504
1505         process = kzalloc(sizeof(*process), GFP_KERNEL);
1506         if (!process)
1507                 goto err_alloc_process;
1508
1509         kref_init(&process->ref);
1510         mutex_init(&process->mutex);
1511         process->mm = thread->mm;
1512         process->lead_thread = thread->group_leader;
1513         process->n_pdds = 0;
1514         process->queues_paused = false;
1515         INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
1516         INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
1517         process->last_restore_timestamp = get_jiffies_64();
1518         err = kfd_event_init_process(process);
1519         if (err)
1520                 goto err_event_init;
1521         process->is_32bit_user_mode = in_compat_syscall();
1522         process->debug_trap_enabled = false;
1523         process->debugger_process = NULL;
1524         process->exception_enable_mask = 0;
1525         atomic_set(&process->debugged_process_count, 0);
1526         sema_init(&process->runtime_enable_sema, 0);
1527
1528         process->pasid = kfd_pasid_alloc();
1529         if (process->pasid == 0) {
1530                 err = -ENOSPC;
1531                 goto err_alloc_pasid;
1532         }
1533
1534         err = pqm_init(&process->pqm, process);
1535         if (err != 0)
1536                 goto err_process_pqm_init;
1537
1538         /* init process apertures*/
1539         err = kfd_init_apertures(process);
1540         if (err != 0)
1541                 goto err_init_apertures;
1542
1543         /* Check XNACK support after PDDs are created in kfd_init_apertures */
1544         process->xnack_enabled = kfd_process_xnack_mode(process, false);
1545
1546         err = svm_range_list_init(process);
1547         if (err)
1548                 goto err_init_svm_range_list;
1549
1550         /* alloc_notifier needs to find the process in the hash table */
1551         hash_add_rcu(kfd_processes_table, &process->kfd_processes,
1552                         (uintptr_t)process->mm);
1553
1554         /* Avoid free_notifier to start kfd_process_wq_release if
1555          * mmu_notifier_get failed because of pending signal.
1556          */
1557         kref_get(&process->ref);
1558
1559         /* MMU notifier registration must be the last call that can fail
1560          * because after this point we cannot unwind the process creation.
1561          * After this point, mmu_notifier_put will trigger the cleanup by
1562          * dropping the last process reference in the free_notifier.
1563          */
1564         mn = mmu_notifier_get(&kfd_process_mmu_notifier_ops, process->mm);
1565         if (IS_ERR(mn)) {
1566                 err = PTR_ERR(mn);
1567                 goto err_register_notifier;
1568         }
1569         BUG_ON(mn != &process->mmu_notifier);
1570
1571         kfd_unref_process(process);
1572         get_task_struct(process->lead_thread);
1573
1574         INIT_WORK(&process->debug_event_workarea, debug_event_write_work_handler);
1575
1576         return process;
1577
1578 err_register_notifier:
1579         hash_del_rcu(&process->kfd_processes);
1580         svm_range_list_fini(process);
1581 err_init_svm_range_list:
1582         kfd_process_free_outstanding_kfd_bos(process);
1583         kfd_process_destroy_pdds(process);
1584 err_init_apertures:
1585         pqm_uninit(&process->pqm);
1586 err_process_pqm_init:
1587         kfd_pasid_free(process->pasid);
1588 err_alloc_pasid:
1589         kfd_event_free_process(process);
1590 err_event_init:
1591         mutex_destroy(&process->mutex);
1592         kfree(process);
1593 err_alloc_process:
1594         return ERR_PTR(err);
1595 }
1596
1597 struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev,
1598                                                         struct kfd_process *p)
1599 {
1600         int i;
1601
1602         for (i = 0; i < p->n_pdds; i++)
1603                 if (p->pdds[i]->dev == dev)
1604                         return p->pdds[i];
1605
1606         return NULL;
1607 }
1608
1609 struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
1610                                                         struct kfd_process *p)
1611 {
1612         struct kfd_process_device *pdd = NULL;
1613
1614         if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
1615                 return NULL;
1616         pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
1617         if (!pdd)
1618                 return NULL;
1619
1620         pdd->dev = dev;
1621         INIT_LIST_HEAD(&pdd->qpd.queues_list);
1622         INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
1623         pdd->qpd.dqm = dev->dqm;
1624         pdd->qpd.pqm = &p->pqm;
1625         pdd->qpd.evicted = 0;
1626         pdd->qpd.mapped_gws_queue = false;
1627         pdd->process = p;
1628         pdd->bound = PDD_UNBOUND;
1629         pdd->already_dequeued = false;
1630         pdd->runtime_inuse = false;
1631         atomic64_set(&pdd->vram_usage, 0);
1632         pdd->sdma_past_activity_counter = 0;
1633         pdd->user_gpu_id = dev->id;
1634         atomic64_set(&pdd->evict_duration_counter, 0);
1635
1636         p->pdds[p->n_pdds++] = pdd;
1637         if (kfd_dbg_is_per_vmid_supported(pdd->dev))
1638                 pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap(
1639                                                         pdd->dev->adev,
1640                                                         false,
1641                                                         0);
1642
1643         /* Init idr used for memory handle translation */
1644         idr_init(&pdd->alloc_idr);
1645
1646         return pdd;
1647 }
1648
1649 /**
1650  * kfd_process_device_init_vm - Initialize a VM for a process-device
1651  *
1652  * @pdd: The process-device
1653  * @drm_file: Optional pointer to a DRM file descriptor
1654  *
1655  * If @drm_file is specified, it will be used to acquire the VM from
1656  * that file descriptor. If successful, the @pdd takes ownership of
1657  * the file descriptor.
1658  *
1659  * If @drm_file is NULL, a new VM is created.
1660  *
1661  * Returns 0 on success, -errno on failure.
1662  */
1663 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
1664                                struct file *drm_file)
1665 {
1666         struct amdgpu_fpriv *drv_priv;
1667         struct amdgpu_vm *avm;
1668         struct kfd_process *p;
1669         struct dma_fence *ef;
1670         struct kfd_node *dev;
1671         int ret;
1672
1673         if (!drm_file)
1674                 return -EINVAL;
1675
1676         if (pdd->drm_priv)
1677                 return -EBUSY;
1678
1679         ret = amdgpu_file_to_fpriv(drm_file, &drv_priv);
1680         if (ret)
1681                 return ret;
1682         avm = &drv_priv->vm;
1683
1684         p = pdd->process;
1685         dev = pdd->dev;
1686
1687         ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
1688                                                      &p->kgd_process_info,
1689                                                      p->ef ? NULL : &ef);
1690         if (ret) {
1691                 dev_err(dev->adev->dev, "Failed to create process VM object\n");
1692                 return ret;
1693         }
1694
1695         if (!p->ef)
1696                 RCU_INIT_POINTER(p->ef, ef);
1697
1698         pdd->drm_priv = drm_file->private_data;
1699
1700         ret = kfd_process_device_reserve_ib_mem(pdd);
1701         if (ret)
1702                 goto err_reserve_ib_mem;
1703         ret = kfd_process_device_init_cwsr_dgpu(pdd);
1704         if (ret)
1705                 goto err_init_cwsr;
1706
1707         ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, avm, p->pasid);
1708         if (ret)
1709                 goto err_set_pasid;
1710
1711         pdd->drm_file = drm_file;
1712
1713         return 0;
1714
1715 err_set_pasid:
1716         kfd_process_device_destroy_cwsr_dgpu(pdd);
1717 err_init_cwsr:
1718         kfd_process_device_destroy_ib_mem(pdd);
1719 err_reserve_ib_mem:
1720         pdd->drm_priv = NULL;
1721         amdgpu_amdkfd_gpuvm_destroy_cb(dev->adev, avm);
1722
1723         return ret;
1724 }
1725
1726 /*
1727  * Direct the IOMMU to bind the process (specifically the pasid->mm)
1728  * to the device.
1729  * Unbinding occurs when the process dies or the device is removed.
1730  *
1731  * Assumes that the process lock is held.
1732  */
1733 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev,
1734                                                         struct kfd_process *p)
1735 {
1736         struct kfd_process_device *pdd;
1737         int err;
1738
1739         pdd = kfd_get_process_device_data(dev, p);
1740         if (!pdd) {
1741                 dev_err(dev->adev->dev, "Process device data doesn't exist\n");
1742                 return ERR_PTR(-ENOMEM);
1743         }
1744
1745         if (!pdd->drm_priv)
1746                 return ERR_PTR(-ENODEV);
1747
1748         /*
1749          * signal runtime-pm system to auto resume and prevent
1750          * further runtime suspend once device pdd is created until
1751          * pdd is destroyed.
1752          */
1753         if (!pdd->runtime_inuse) {
1754                 err = pm_runtime_get_sync(adev_to_drm(dev->adev)->dev);
1755                 if (err < 0) {
1756                         pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev);
1757                         return ERR_PTR(err);
1758                 }
1759         }
1760
1761         /*
1762          * make sure that runtime_usage counter is incremented just once
1763          * per pdd
1764          */
1765         pdd->runtime_inuse = true;
1766
1767         return pdd;
1768 }
1769
1770 /* Create specific handle mapped to mem from process local memory idr
1771  * Assumes that the process lock is held.
1772  */
1773 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
1774                                         void *mem)
1775 {
1776         return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
1777 }
1778
1779 /* Translate specific handle from process local memory idr
1780  * Assumes that the process lock is held.
1781  */
1782 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
1783                                         int handle)
1784 {
1785         if (handle < 0)
1786                 return NULL;
1787
1788         return idr_find(&pdd->alloc_idr, handle);
1789 }
1790
1791 /* Remove specific handle from process local memory idr
1792  * Assumes that the process lock is held.
1793  */
1794 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
1795                                         int handle)
1796 {
1797         if (handle >= 0)
1798                 idr_remove(&pdd->alloc_idr, handle);
1799 }
1800
1801 /* This increments the process->ref counter. */
1802 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid)
1803 {
1804         struct kfd_process *p, *ret_p = NULL;
1805         unsigned int temp;
1806
1807         int idx = srcu_read_lock(&kfd_processes_srcu);
1808
1809         hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1810                 if (p->pasid == pasid) {
1811                         kref_get(&p->ref);
1812                         ret_p = p;
1813                         break;
1814                 }
1815         }
1816
1817         srcu_read_unlock(&kfd_processes_srcu, idx);
1818
1819         return ret_p;
1820 }
1821
1822 /* This increments the process->ref counter. */
1823 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
1824 {
1825         struct kfd_process *p;
1826
1827         int idx = srcu_read_lock(&kfd_processes_srcu);
1828
1829         p = find_process_by_mm(mm);
1830         if (p)
1831                 kref_get(&p->ref);
1832
1833         srcu_read_unlock(&kfd_processes_srcu, idx);
1834
1835         return p;
1836 }
1837
1838 /* kfd_process_evict_queues - Evict all user queues of a process
1839  *
1840  * Eviction is reference-counted per process-device. This means multiple
1841  * evictions from different sources can be nested safely.
1842  */
1843 int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger)
1844 {
1845         int r = 0;
1846         int i;
1847         unsigned int n_evicted = 0;
1848
1849         for (i = 0; i < p->n_pdds; i++) {
1850                 struct kfd_process_device *pdd = p->pdds[i];
1851                 struct device *dev = pdd->dev->adev->dev;
1852
1853                 kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid,
1854                                              trigger);
1855
1856                 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
1857                                                             &pdd->qpd);
1858                 /* evict return -EIO if HWS is hang or asic is resetting, in this case
1859                  * we would like to set all the queues to be in evicted state to prevent
1860                  * them been add back since they actually not be saved right now.
1861                  */
1862                 if (r && r != -EIO) {
1863                         dev_err(dev, "Failed to evict process queues\n");
1864                         goto fail;
1865                 }
1866                 n_evicted++;
1867
1868                 pdd->dev->dqm->is_hws_hang = false;
1869         }
1870
1871         return r;
1872
1873 fail:
1874         /* To keep state consistent, roll back partial eviction by
1875          * restoring queues
1876          */
1877         for (i = 0; i < p->n_pdds; i++) {
1878                 struct kfd_process_device *pdd = p->pdds[i];
1879
1880                 if (n_evicted == 0)
1881                         break;
1882
1883                 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
1884
1885                 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1886                                                               &pdd->qpd))
1887                         dev_err(pdd->dev->adev->dev,
1888                                 "Failed to restore queues\n");
1889
1890                 n_evicted--;
1891         }
1892
1893         return r;
1894 }
1895
1896 /* kfd_process_restore_queues - Restore all user queues of a process */
1897 int kfd_process_restore_queues(struct kfd_process *p)
1898 {
1899         int r, ret = 0;
1900         int i;
1901
1902         for (i = 0; i < p->n_pdds; i++) {
1903                 struct kfd_process_device *pdd = p->pdds[i];
1904                 struct device *dev = pdd->dev->adev->dev;
1905
1906                 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
1907
1908                 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1909                                                               &pdd->qpd);
1910                 if (r) {
1911                         dev_err(dev, "Failed to restore process queues\n");
1912                         if (!ret)
1913                                 ret = r;
1914                 }
1915         }
1916
1917         return ret;
1918 }
1919
1920 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
1921 {
1922         int i;
1923
1924         for (i = 0; i < p->n_pdds; i++)
1925                 if (p->pdds[i] && gpu_id == p->pdds[i]->user_gpu_id)
1926                         return i;
1927         return -EINVAL;
1928 }
1929
1930 int
1931 kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node,
1932                             uint32_t *gpuid, uint32_t *gpuidx)
1933 {
1934         int i;
1935
1936         for (i = 0; i < p->n_pdds; i++)
1937                 if (p->pdds[i] && p->pdds[i]->dev == node) {
1938                         *gpuid = p->pdds[i]->user_gpu_id;
1939                         *gpuidx = i;
1940                         return 0;
1941                 }
1942         return -EINVAL;
1943 }
1944
1945 static int signal_eviction_fence(struct kfd_process *p)
1946 {
1947         struct dma_fence *ef;
1948         int ret;
1949
1950         rcu_read_lock();
1951         ef = dma_fence_get_rcu_safe(&p->ef);
1952         rcu_read_unlock();
1953         if (!ef)
1954                 return -EINVAL;
1955
1956         ret = dma_fence_signal(ef);
1957         dma_fence_put(ef);
1958
1959         return ret;
1960 }
1961
1962 static void evict_process_worker(struct work_struct *work)
1963 {
1964         int ret;
1965         struct kfd_process *p;
1966         struct delayed_work *dwork;
1967
1968         dwork = to_delayed_work(work);
1969
1970         /* Process termination destroys this worker thread. So during the
1971          * lifetime of this thread, kfd_process p will be valid
1972          */
1973         p = container_of(dwork, struct kfd_process, eviction_work);
1974
1975         pr_debug("Started evicting pasid 0x%x\n", p->pasid);
1976         ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM);
1977         if (!ret) {
1978                 /* If another thread already signaled the eviction fence,
1979                  * they are responsible stopping the queues and scheduling
1980                  * the restore work.
1981                  */
1982                 if (signal_eviction_fence(p) ||
1983                     mod_delayed_work(kfd_restore_wq, &p->restore_work,
1984                                      msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
1985                         kfd_process_restore_queues(p);
1986
1987                 pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
1988         } else
1989                 pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
1990 }
1991
1992 static int restore_process_helper(struct kfd_process *p)
1993 {
1994         int ret = 0;
1995
1996         /* VMs may not have been acquired yet during debugging. */
1997         if (p->kgd_process_info) {
1998                 ret = amdgpu_amdkfd_gpuvm_restore_process_bos(
1999                         p->kgd_process_info, &p->ef);
2000                 if (ret)
2001                         return ret;
2002         }
2003
2004         ret = kfd_process_restore_queues(p);
2005         if (!ret)
2006                 pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
2007         else
2008                 pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
2009
2010         return ret;
2011 }
2012
2013 static void restore_process_worker(struct work_struct *work)
2014 {
2015         struct delayed_work *dwork;
2016         struct kfd_process *p;
2017         int ret = 0;
2018
2019         dwork = to_delayed_work(work);
2020
2021         /* Process termination destroys this worker thread. So during the
2022          * lifetime of this thread, kfd_process p will be valid
2023          */
2024         p = container_of(dwork, struct kfd_process, restore_work);
2025         pr_debug("Started restoring pasid 0x%x\n", p->pasid);
2026
2027         /* Setting last_restore_timestamp before successful restoration.
2028          * Otherwise this would have to be set by KGD (restore_process_bos)
2029          * before KFD BOs are unreserved. If not, the process can be evicted
2030          * again before the timestamp is set.
2031          * If restore fails, the timestamp will be set again in the next
2032          * attempt. This would mean that the minimum GPU quanta would be
2033          * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
2034          * functions)
2035          */
2036
2037         p->last_restore_timestamp = get_jiffies_64();
2038
2039         ret = restore_process_helper(p);
2040         if (ret) {
2041                 pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
2042                          p->pasid, PROCESS_BACK_OFF_TIME_MS);
2043                 if (mod_delayed_work(kfd_restore_wq, &p->restore_work,
2044                                      msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
2045                         kfd_process_restore_queues(p);
2046         }
2047 }
2048
2049 void kfd_suspend_all_processes(void)
2050 {
2051         struct kfd_process *p;
2052         unsigned int temp;
2053         int idx = srcu_read_lock(&kfd_processes_srcu);
2054
2055         WARN(debug_evictions, "Evicting all processes");
2056         hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2057                 if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND))
2058                         pr_err("Failed to suspend process 0x%x\n", p->pasid);
2059                 signal_eviction_fence(p);
2060         }
2061         srcu_read_unlock(&kfd_processes_srcu, idx);
2062 }
2063
2064 int kfd_resume_all_processes(void)
2065 {
2066         struct kfd_process *p;
2067         unsigned int temp;
2068         int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
2069
2070         hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2071                 if (restore_process_helper(p)) {
2072                         pr_err("Restore process %d failed during resume\n",
2073                                p->pasid);
2074                         ret = -EFAULT;
2075                 }
2076         }
2077         srcu_read_unlock(&kfd_processes_srcu, idx);
2078         return ret;
2079 }
2080
2081 int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process,
2082                           struct vm_area_struct *vma)
2083 {
2084         struct kfd_process_device *pdd;
2085         struct qcm_process_device *qpd;
2086
2087         if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
2088                 dev_err(dev->adev->dev, "Incorrect CWSR mapping size.\n");
2089                 return -EINVAL;
2090         }
2091
2092         pdd = kfd_get_process_device_data(dev, process);
2093         if (!pdd)
2094                 return -EINVAL;
2095         qpd = &pdd->qpd;
2096
2097         qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2098                                         get_order(KFD_CWSR_TBA_TMA_SIZE));
2099         if (!qpd->cwsr_kaddr) {
2100                 dev_err(dev->adev->dev,
2101                         "Error allocating per process CWSR buffer.\n");
2102                 return -ENOMEM;
2103         }
2104
2105         vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND
2106                 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
2107         /* Mapping pages to user process */
2108         return remap_pfn_range(vma, vma->vm_start,
2109                                PFN_DOWN(__pa(qpd->cwsr_kaddr)),
2110                                KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
2111 }
2112
2113 /* assumes caller holds process lock. */
2114 int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
2115 {
2116         uint32_t irq_drain_fence[8];
2117         uint8_t node_id = 0;
2118         int r = 0;
2119
2120         if (!KFD_IS_SOC15(pdd->dev))
2121                 return 0;
2122
2123         pdd->process->irq_drain_is_open = true;
2124
2125         memset(irq_drain_fence, 0, sizeof(irq_drain_fence));
2126         irq_drain_fence[0] = (KFD_IRQ_FENCE_SOURCEID << 8) |
2127                                                         KFD_IRQ_FENCE_CLIENTID;
2128         irq_drain_fence[3] = pdd->process->pasid;
2129
2130         /*
2131          * For GFX 9.4.3/9.5.0, send the NodeId also in IH cookie DW[3]
2132          */
2133         if (KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 3) ||
2134             KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 4) ||
2135             KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 5, 0)) {
2136                 node_id = ffs(pdd->dev->interrupt_bitmap) - 1;
2137                 irq_drain_fence[3] |= node_id << 16;
2138         }
2139
2140         /* ensure stale irqs scheduled KFD interrupts and send drain fence. */
2141         if (amdgpu_amdkfd_send_close_event_drain_irq(pdd->dev->adev,
2142                                                      irq_drain_fence)) {
2143                 pdd->process->irq_drain_is_open = false;
2144                 return 0;
2145         }
2146
2147         r = wait_event_interruptible(pdd->process->wait_irq_drain,
2148                                      !READ_ONCE(pdd->process->irq_drain_is_open));
2149         if (r)
2150                 pdd->process->irq_drain_is_open = false;
2151
2152         return r;
2153 }
2154
2155 void kfd_process_close_interrupt_drain(unsigned int pasid)
2156 {
2157         struct kfd_process *p;
2158
2159         p = kfd_lookup_process_by_pasid(pasid);
2160
2161         if (!p)
2162                 return;
2163
2164         WRITE_ONCE(p->irq_drain_is_open, false);
2165         wake_up_all(&p->wait_irq_drain);
2166         kfd_unref_process(p);
2167 }
2168
2169 struct send_exception_work_handler_workarea {
2170         struct work_struct work;
2171         struct kfd_process *p;
2172         unsigned int queue_id;
2173         uint64_t error_reason;
2174 };
2175
2176 static void send_exception_work_handler(struct work_struct *work)
2177 {
2178         struct send_exception_work_handler_workarea *workarea;
2179         struct kfd_process *p;
2180         struct queue *q;
2181         struct mm_struct *mm;
2182         struct kfd_context_save_area_header __user *csa_header;
2183         uint64_t __user *err_payload_ptr;
2184         uint64_t cur_err;
2185         uint32_t ev_id;
2186
2187         workarea = container_of(work,
2188                                 struct send_exception_work_handler_workarea,
2189                                 work);
2190         p = workarea->p;
2191
2192         mm = get_task_mm(p->lead_thread);
2193
2194         if (!mm)
2195                 return;
2196
2197         kthread_use_mm(mm);
2198
2199         q = pqm_get_user_queue(&p->pqm, workarea->queue_id);
2200
2201         if (!q)
2202                 goto out;
2203
2204         csa_header = (void __user *)q->properties.ctx_save_restore_area_address;
2205
2206         get_user(err_payload_ptr, (uint64_t __user **)&csa_header->err_payload_addr);
2207         get_user(cur_err, err_payload_ptr);
2208         cur_err |= workarea->error_reason;
2209         put_user(cur_err, err_payload_ptr);
2210         get_user(ev_id, &csa_header->err_event_id);
2211
2212         kfd_set_event(p, ev_id);
2213
2214 out:
2215         kthread_unuse_mm(mm);
2216         mmput(mm);
2217 }
2218
2219 int kfd_send_exception_to_runtime(struct kfd_process *p,
2220                         unsigned int queue_id,
2221                         uint64_t error_reason)
2222 {
2223         struct send_exception_work_handler_workarea worker;
2224
2225         INIT_WORK_ONSTACK(&worker.work, send_exception_work_handler);
2226
2227         worker.p = p;
2228         worker.queue_id = queue_id;
2229         worker.error_reason = error_reason;
2230
2231         schedule_work(&worker.work);
2232         flush_work(&worker.work);
2233         destroy_work_on_stack(&worker.work);
2234
2235         return 0;
2236 }
2237
2238 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id)
2239 {
2240         int i;
2241
2242         if (gpu_id) {
2243                 for (i = 0; i < p->n_pdds; i++) {
2244                         struct kfd_process_device *pdd = p->pdds[i];
2245
2246                         if (pdd->user_gpu_id == gpu_id)
2247                                 return pdd;
2248                 }
2249         }
2250         return NULL;
2251 }
2252
2253 int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id)
2254 {
2255         int i;
2256
2257         if (!actual_gpu_id)
2258                 return 0;
2259
2260         for (i = 0; i < p->n_pdds; i++) {
2261                 struct kfd_process_device *pdd = p->pdds[i];
2262
2263                 if (pdd->dev->id == actual_gpu_id)
2264                         return pdd->user_gpu_id;
2265         }
2266         return -EINVAL;
2267 }
2268
2269 #if defined(CONFIG_DEBUG_FS)
2270
2271 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
2272 {
2273         struct kfd_process *p;
2274         unsigned int temp;
2275         int r = 0;
2276
2277         int idx = srcu_read_lock(&kfd_processes_srcu);
2278
2279         hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2280                 seq_printf(m, "Process %d PASID 0x%x:\n",
2281                            p->lead_thread->tgid, p->pasid);
2282
2283                 mutex_lock(&p->mutex);
2284                 r = pqm_debugfs_mqds(m, &p->pqm);
2285                 mutex_unlock(&p->mutex);
2286
2287                 if (r)
2288                         break;
2289         }
2290
2291         srcu_read_unlock(&kfd_processes_srcu, idx);
2292
2293         return r;
2294 }
2295
2296 #endif
This page took 0.164116 seconds and 4 git commands to generate.