]> Git Repo - J-linux.git/blob - drivers/gpu/drm/amd/amdkfd/kfd_process.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / gpu / drm / amd / amdkfd / kfd_process.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23
24 #include <linux/mutex.h>
25 #include <linux/log2.h>
26 #include <linux/sched.h>
27 #include <linux/sched/mm.h>
28 #include <linux/sched/task.h>
29 #include <linux/mmu_context.h>
30 #include <linux/slab.h>
31 #include <linux/notifier.h>
32 #include <linux/compat.h>
33 #include <linux/mman.h>
34 #include <linux/file.h>
35 #include <linux/pm_runtime.h>
36 #include "amdgpu_amdkfd.h"
37 #include "amdgpu.h"
38
39 struct mm_struct;
40
41 #include "kfd_priv.h"
42 #include "kfd_device_queue_manager.h"
43 #include "kfd_svm.h"
44 #include "kfd_smi_events.h"
45 #include "kfd_debug.h"
46
47 /*
48  * List of struct kfd_process (field kfd_process).
49  * Unique/indexed by mm_struct*
50  */
51 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
52 DEFINE_MUTEX(kfd_processes_mutex);
53
54 DEFINE_SRCU(kfd_processes_srcu);
55
56 /* For process termination handling */
57 static struct workqueue_struct *kfd_process_wq;
58
59 /* Ordered, single-threaded workqueue for restoring evicted
60  * processes. Restoring multiple processes concurrently under memory
61  * pressure can lead to processes blocking each other from validating
62  * their BOs and result in a live-lock situation where processes
63  * remain evicted indefinitely.
64  */
65 static struct workqueue_struct *kfd_restore_wq;
66
67 static struct kfd_process *find_process(const struct task_struct *thread,
68                                         bool ref);
69 static void kfd_process_ref_release(struct kref *ref);
70 static struct kfd_process *create_process(const struct task_struct *thread);
71
72 static void evict_process_worker(struct work_struct *work);
73 static void restore_process_worker(struct work_struct *work);
74
75 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd);
76
77 struct kfd_procfs_tree {
78         struct kobject *kobj;
79 };
80
81 static struct kfd_procfs_tree procfs;
82
83 /*
84  * Structure for SDMA activity tracking
85  */
86 struct kfd_sdma_activity_handler_workarea {
87         struct work_struct sdma_activity_work;
88         struct kfd_process_device *pdd;
89         uint64_t sdma_activity_counter;
90 };
91
92 struct temp_sdma_queue_list {
93         uint64_t __user *rptr;
94         uint64_t sdma_val;
95         unsigned int queue_id;
96         struct list_head list;
97 };
98
99 static void kfd_sdma_activity_worker(struct work_struct *work)
100 {
101         struct kfd_sdma_activity_handler_workarea *workarea;
102         struct kfd_process_device *pdd;
103         uint64_t val;
104         struct mm_struct *mm;
105         struct queue *q;
106         struct qcm_process_device *qpd;
107         struct device_queue_manager *dqm;
108         int ret = 0;
109         struct temp_sdma_queue_list sdma_q_list;
110         struct temp_sdma_queue_list *sdma_q, *next;
111
112         workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
113                                 sdma_activity_work);
114
115         pdd = workarea->pdd;
116         if (!pdd)
117                 return;
118         dqm = pdd->dev->dqm;
119         qpd = &pdd->qpd;
120         if (!dqm || !qpd)
121                 return;
122         /*
123          * Total SDMA activity is current SDMA activity + past SDMA activity
124          * Past SDMA count is stored in pdd.
125          * To get the current activity counters for all active SDMA queues,
126          * we loop over all SDMA queues and get their counts from user-space.
127          *
128          * We cannot call get_user() with dqm_lock held as it can cause
129          * a circular lock dependency situation. To read the SDMA stats,
130          * we need to do the following:
131          *
132          * 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list,
133          *    with dqm_lock/dqm_unlock().
134          * 2. Call get_user() for each node in temporary list without dqm_lock.
135          *    Save the SDMA count for each node and also add the count to the total
136          *    SDMA count counter.
137          *    Its possible, during this step, a few SDMA queue nodes got deleted
138          *    from the qpd->queues_list.
139          * 3. Do a second pass over qpd->queues_list to check if any nodes got deleted.
140          *    If any node got deleted, its SDMA count would be captured in the sdma
141          *    past activity counter. So subtract the SDMA counter stored in step 2
142          *    for this node from the total SDMA count.
143          */
144         INIT_LIST_HEAD(&sdma_q_list.list);
145
146         /*
147          * Create the temp list of all SDMA queues
148          */
149         dqm_lock(dqm);
150
151         list_for_each_entry(q, &qpd->queues_list, list) {
152                 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
153                     (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
154                         continue;
155
156                 sdma_q = kzalloc(sizeof(struct temp_sdma_queue_list), GFP_KERNEL);
157                 if (!sdma_q) {
158                         dqm_unlock(dqm);
159                         goto cleanup;
160                 }
161
162                 INIT_LIST_HEAD(&sdma_q->list);
163                 sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr;
164                 sdma_q->queue_id = q->properties.queue_id;
165                 list_add_tail(&sdma_q->list, &sdma_q_list.list);
166         }
167
168         /*
169          * If the temp list is empty, then no SDMA queues nodes were found in
170          * qpd->queues_list. Return the past activity count as the total sdma
171          * count
172          */
173         if (list_empty(&sdma_q_list.list)) {
174                 workarea->sdma_activity_counter = pdd->sdma_past_activity_counter;
175                 dqm_unlock(dqm);
176                 return;
177         }
178
179         dqm_unlock(dqm);
180
181         /*
182          * Get the usage count for each SDMA queue in temp_list.
183          */
184         mm = get_task_mm(pdd->process->lead_thread);
185         if (!mm)
186                 goto cleanup;
187
188         kthread_use_mm(mm);
189
190         list_for_each_entry(sdma_q, &sdma_q_list.list, list) {
191                 val = 0;
192                 ret = read_sdma_queue_counter(sdma_q->rptr, &val);
193                 if (ret) {
194                         pr_debug("Failed to read SDMA queue active counter for queue id: %d",
195                                  sdma_q->queue_id);
196                 } else {
197                         sdma_q->sdma_val = val;
198                         workarea->sdma_activity_counter += val;
199                 }
200         }
201
202         kthread_unuse_mm(mm);
203         mmput(mm);
204
205         /*
206          * Do a second iteration over qpd_queues_list to check if any SDMA
207          * nodes got deleted while fetching SDMA counter.
208          */
209         dqm_lock(dqm);
210
211         workarea->sdma_activity_counter += pdd->sdma_past_activity_counter;
212
213         list_for_each_entry(q, &qpd->queues_list, list) {
214                 if (list_empty(&sdma_q_list.list))
215                         break;
216
217                 if ((q->properties.type != KFD_QUEUE_TYPE_SDMA) &&
218                     (q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI))
219                         continue;
220
221                 list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
222                         if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) &&
223                              (sdma_q->queue_id == q->properties.queue_id)) {
224                                 list_del(&sdma_q->list);
225                                 kfree(sdma_q);
226                                 break;
227                         }
228                 }
229         }
230
231         dqm_unlock(dqm);
232
233         /*
234          * If temp list is not empty, it implies some queues got deleted
235          * from qpd->queues_list during SDMA usage read. Subtract the SDMA
236          * count for each node from the total SDMA count.
237          */
238         list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
239                 workarea->sdma_activity_counter -= sdma_q->sdma_val;
240                 list_del(&sdma_q->list);
241                 kfree(sdma_q);
242         }
243
244         return;
245
246 cleanup:
247         list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
248                 list_del(&sdma_q->list);
249                 kfree(sdma_q);
250         }
251 }
252
253 /**
254  * kfd_get_cu_occupancy - Collect number of waves in-flight on this device
255  * by current process. Translates acquired wave count into number of compute units
256  * that are occupied.
257  *
258  * @attr: Handle of attribute that allows reporting of wave count. The attribute
259  * handle encapsulates GPU device it is associated with, thereby allowing collection
260  * of waves in flight, etc
261  * @buffer: Handle of user provided buffer updated with wave count
262  *
263  * Return: Number of bytes written to user buffer or an error value
264  */
265 static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
266 {
267         int cu_cnt;
268         int wave_cnt;
269         int max_waves_per_cu;
270         struct kfd_node *dev = NULL;
271         struct kfd_process *proc = NULL;
272         struct kfd_process_device *pdd = NULL;
273         int i;
274         struct kfd_cu_occupancy *cu_occupancy;
275         u32 queue_format;
276
277         pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
278         dev = pdd->dev;
279         if (dev->kfd2kgd->get_cu_occupancy == NULL)
280                 return -EINVAL;
281
282         cu_cnt = 0;
283         proc = pdd->process;
284         if (pdd->qpd.queue_count == 0) {
285                 pr_debug("Gpu-Id: %d has no active queues for process %d\n",
286                          dev->id, proc->pasid);
287                 return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
288         }
289
290         /* Collect wave count from device if it supports */
291         wave_cnt = 0;
292         max_waves_per_cu = 0;
293
294         cu_occupancy = kcalloc(AMDGPU_MAX_QUEUES, sizeof(*cu_occupancy), GFP_KERNEL);
295         if (!cu_occupancy)
296                 return -ENOMEM;
297
298         /*
299          * For GFX 9.4.3, fetch the CU occupancy from the first XCC in the partition.
300          * For AQL queues, because of cooperative dispatch we multiply the wave count
301          * by number of XCCs in the partition to get the total wave counts across all
302          * XCCs in the partition.
303          * For PM4 queues, there is no cooperative dispatch so wave_cnt stay as it is.
304          */
305         dev->kfd2kgd->get_cu_occupancy(dev->adev, cu_occupancy,
306                         &max_waves_per_cu, ffs(dev->xcc_mask) - 1);
307
308         for (i = 0; i < AMDGPU_MAX_QUEUES; i++) {
309                 if (cu_occupancy[i].wave_cnt != 0 &&
310                     kfd_dqm_is_queue_in_process(dev->dqm, &pdd->qpd,
311                                                 cu_occupancy[i].doorbell_off,
312                                                 &queue_format)) {
313                         if (unlikely(queue_format == KFD_QUEUE_FORMAT_PM4))
314                                 wave_cnt += cu_occupancy[i].wave_cnt;
315                         else
316                                 wave_cnt += (NUM_XCC(dev->xcc_mask) *
317                                                 cu_occupancy[i].wave_cnt);
318                 }
319         }
320
321         /* Translate wave count to number of compute units */
322         cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
323         kfree(cu_occupancy);
324         return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
325 }
326
327 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
328                                char *buffer)
329 {
330         if (strcmp(attr->name, "pasid") == 0) {
331                 struct kfd_process *p = container_of(attr, struct kfd_process,
332                                                      attr_pasid);
333
334                 return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
335         } else if (strncmp(attr->name, "vram_", 5) == 0) {
336                 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
337                                                               attr_vram);
338                 return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage));
339         } else if (strncmp(attr->name, "sdma_", 5) == 0) {
340                 struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
341                                                               attr_sdma);
342                 struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
343
344                 INIT_WORK_ONSTACK(&sdma_activity_work_handler.sdma_activity_work,
345                                   kfd_sdma_activity_worker);
346
347                 sdma_activity_work_handler.pdd = pdd;
348                 sdma_activity_work_handler.sdma_activity_counter = 0;
349
350                 schedule_work(&sdma_activity_work_handler.sdma_activity_work);
351
352                 flush_work(&sdma_activity_work_handler.sdma_activity_work);
353                 destroy_work_on_stack(&sdma_activity_work_handler.sdma_activity_work);
354
355                 return snprintf(buffer, PAGE_SIZE, "%llu\n",
356                                 (sdma_activity_work_handler.sdma_activity_counter)/
357                                  SDMA_ACTIVITY_DIVISOR);
358         } else {
359                 pr_err("Invalid attribute");
360                 return -EINVAL;
361         }
362
363         return 0;
364 }
365
366 static void kfd_procfs_kobj_release(struct kobject *kobj)
367 {
368         kfree(kobj);
369 }
370
371 static const struct sysfs_ops kfd_procfs_ops = {
372         .show = kfd_procfs_show,
373 };
374
375 static const struct kobj_type procfs_type = {
376         .release = kfd_procfs_kobj_release,
377         .sysfs_ops = &kfd_procfs_ops,
378 };
379
380 void kfd_procfs_init(void)
381 {
382         int ret = 0;
383
384         procfs.kobj = kfd_alloc_struct(procfs.kobj);
385         if (!procfs.kobj)
386                 return;
387
388         ret = kobject_init_and_add(procfs.kobj, &procfs_type,
389                                    &kfd_device->kobj, "proc");
390         if (ret) {
391                 pr_warn("Could not create procfs proc folder");
392                 /* If we fail to create the procfs, clean up */
393                 kfd_procfs_shutdown();
394         }
395 }
396
397 void kfd_procfs_shutdown(void)
398 {
399         if (procfs.kobj) {
400                 kobject_del(procfs.kobj);
401                 kobject_put(procfs.kobj);
402                 procfs.kobj = NULL;
403         }
404 }
405
406 static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
407                                      struct attribute *attr, char *buffer)
408 {
409         struct queue *q = container_of(kobj, struct queue, kobj);
410
411         if (!strcmp(attr->name, "size"))
412                 return snprintf(buffer, PAGE_SIZE, "%llu",
413                                 q->properties.queue_size);
414         else if (!strcmp(attr->name, "type"))
415                 return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
416         else if (!strcmp(attr->name, "gpuid"))
417                 return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
418         else
419                 pr_err("Invalid attribute");
420
421         return 0;
422 }
423
424 static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
425                                      struct attribute *attr, char *buffer)
426 {
427         if (strcmp(attr->name, "evicted_ms") == 0) {
428                 struct kfd_process_device *pdd = container_of(attr,
429                                 struct kfd_process_device,
430                                 attr_evict);
431                 uint64_t evict_jiffies;
432
433                 evict_jiffies = atomic64_read(&pdd->evict_duration_counter);
434
435                 return snprintf(buffer,
436                                 PAGE_SIZE,
437                                 "%llu\n",
438                                 jiffies64_to_msecs(evict_jiffies));
439
440         /* Sysfs handle that gets CU occupancy is per device */
441         } else if (strcmp(attr->name, "cu_occupancy") == 0) {
442                 return kfd_get_cu_occupancy(attr, buffer);
443         } else {
444                 pr_err("Invalid attribute");
445         }
446
447         return 0;
448 }
449
450 static ssize_t kfd_sysfs_counters_show(struct kobject *kobj,
451                                        struct attribute *attr, char *buf)
452 {
453         struct kfd_process_device *pdd;
454
455         if (!strcmp(attr->name, "faults")) {
456                 pdd = container_of(attr, struct kfd_process_device,
457                                    attr_faults);
458                 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults));
459         }
460         if (!strcmp(attr->name, "page_in")) {
461                 pdd = container_of(attr, struct kfd_process_device,
462                                    attr_page_in);
463                 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in));
464         }
465         if (!strcmp(attr->name, "page_out")) {
466                 pdd = container_of(attr, struct kfd_process_device,
467                                    attr_page_out);
468                 return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out));
469         }
470         return 0;
471 }
472
473 static struct attribute attr_queue_size = {
474         .name = "size",
475         .mode = KFD_SYSFS_FILE_MODE
476 };
477
478 static struct attribute attr_queue_type = {
479         .name = "type",
480         .mode = KFD_SYSFS_FILE_MODE
481 };
482
483 static struct attribute attr_queue_gpuid = {
484         .name = "gpuid",
485         .mode = KFD_SYSFS_FILE_MODE
486 };
487
488 static struct attribute *procfs_queue_attrs[] = {
489         &attr_queue_size,
490         &attr_queue_type,
491         &attr_queue_gpuid,
492         NULL
493 };
494 ATTRIBUTE_GROUPS(procfs_queue);
495
496 static const struct sysfs_ops procfs_queue_ops = {
497         .show = kfd_procfs_queue_show,
498 };
499
500 static const struct kobj_type procfs_queue_type = {
501         .sysfs_ops = &procfs_queue_ops,
502         .default_groups = procfs_queue_groups,
503 };
504
505 static const struct sysfs_ops procfs_stats_ops = {
506         .show = kfd_procfs_stats_show,
507 };
508
509 static const struct kobj_type procfs_stats_type = {
510         .sysfs_ops = &procfs_stats_ops,
511         .release = kfd_procfs_kobj_release,
512 };
513
514 static const struct sysfs_ops sysfs_counters_ops = {
515         .show = kfd_sysfs_counters_show,
516 };
517
518 static const struct kobj_type sysfs_counters_type = {
519         .sysfs_ops = &sysfs_counters_ops,
520         .release = kfd_procfs_kobj_release,
521 };
522
523 int kfd_procfs_add_queue(struct queue *q)
524 {
525         struct kfd_process *proc;
526         int ret;
527
528         if (!q || !q->process)
529                 return -EINVAL;
530         proc = q->process;
531
532         /* Create proc/<pid>/queues/<queue id> folder */
533         if (!proc->kobj_queues)
534                 return -EFAULT;
535         ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
536                         proc->kobj_queues, "%u", q->properties.queue_id);
537         if (ret < 0) {
538                 pr_warn("Creating proc/<pid>/queues/%u failed",
539                         q->properties.queue_id);
540                 kobject_put(&q->kobj);
541                 return ret;
542         }
543
544         return 0;
545 }
546
547 static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr,
548                                  char *name)
549 {
550         int ret;
551
552         if (!kobj || !attr || !name)
553                 return;
554
555         attr->name = name;
556         attr->mode = KFD_SYSFS_FILE_MODE;
557         sysfs_attr_init(attr);
558
559         ret = sysfs_create_file(kobj, attr);
560         if (ret)
561                 pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret);
562 }
563
564 static void kfd_procfs_add_sysfs_stats(struct kfd_process *p)
565 {
566         int ret;
567         int i;
568         char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
569
570         if (!p || !p->kobj)
571                 return;
572
573         /*
574          * Create sysfs files for each GPU:
575          * - proc/<pid>/stats_<gpuid>/
576          * - proc/<pid>/stats_<gpuid>/evicted_ms
577          * - proc/<pid>/stats_<gpuid>/cu_occupancy
578          */
579         for (i = 0; i < p->n_pdds; i++) {
580                 struct kfd_process_device *pdd = p->pdds[i];
581
582                 snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
583                                 "stats_%u", pdd->dev->id);
584                 pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats);
585                 if (!pdd->kobj_stats)
586                         return;
587
588                 ret = kobject_init_and_add(pdd->kobj_stats,
589                                            &procfs_stats_type,
590                                            p->kobj,
591                                            stats_dir_filename);
592
593                 if (ret) {
594                         pr_warn("Creating KFD proc/stats_%s folder failed",
595                                 stats_dir_filename);
596                         kobject_put(pdd->kobj_stats);
597                         pdd->kobj_stats = NULL;
598                         return;
599                 }
600
601                 kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict,
602                                       "evicted_ms");
603                 /* Add sysfs file to report compute unit occupancy */
604                 if (pdd->dev->kfd2kgd->get_cu_occupancy)
605                         kfd_sysfs_create_file(pdd->kobj_stats,
606                                               &pdd->attr_cu_occupancy,
607                                               "cu_occupancy");
608         }
609 }
610
611 static void kfd_procfs_add_sysfs_counters(struct kfd_process *p)
612 {
613         int ret = 0;
614         int i;
615         char counters_dir_filename[MAX_SYSFS_FILENAME_LEN];
616
617         if (!p || !p->kobj)
618                 return;
619
620         /*
621          * Create sysfs files for each GPU which supports SVM
622          * - proc/<pid>/counters_<gpuid>/
623          * - proc/<pid>/counters_<gpuid>/faults
624          * - proc/<pid>/counters_<gpuid>/page_in
625          * - proc/<pid>/counters_<gpuid>/page_out
626          */
627         for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
628                 struct kfd_process_device *pdd = p->pdds[i];
629                 struct kobject *kobj_counters;
630
631                 snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN,
632                         "counters_%u", pdd->dev->id);
633                 kobj_counters = kfd_alloc_struct(kobj_counters);
634                 if (!kobj_counters)
635                         return;
636
637                 ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type,
638                                            p->kobj, counters_dir_filename);
639                 if (ret) {
640                         pr_warn("Creating KFD proc/%s folder failed",
641                                 counters_dir_filename);
642                         kobject_put(kobj_counters);
643                         return;
644                 }
645
646                 pdd->kobj_counters = kobj_counters;
647                 kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults,
648                                       "faults");
649                 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in,
650                                       "page_in");
651                 kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out,
652                                       "page_out");
653         }
654 }
655
656 static void kfd_procfs_add_sysfs_files(struct kfd_process *p)
657 {
658         int i;
659
660         if (!p || !p->kobj)
661                 return;
662
663         /*
664          * Create sysfs files for each GPU:
665          * - proc/<pid>/vram_<gpuid>
666          * - proc/<pid>/sdma_<gpuid>
667          */
668         for (i = 0; i < p->n_pdds; i++) {
669                 struct kfd_process_device *pdd = p->pdds[i];
670
671                 snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
672                          pdd->dev->id);
673                 kfd_sysfs_create_file(p->kobj, &pdd->attr_vram,
674                                       pdd->vram_filename);
675
676                 snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
677                          pdd->dev->id);
678                 kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma,
679                                             pdd->sdma_filename);
680         }
681 }
682
683 void kfd_procfs_del_queue(struct queue *q)
684 {
685         if (!q)
686                 return;
687
688         kobject_del(&q->kobj);
689         kobject_put(&q->kobj);
690 }
691
692 int kfd_process_create_wq(void)
693 {
694         if (!kfd_process_wq)
695                 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
696         if (!kfd_restore_wq)
697                 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq",
698                                                          WQ_FREEZABLE);
699
700         if (!kfd_process_wq || !kfd_restore_wq) {
701                 kfd_process_destroy_wq();
702                 return -ENOMEM;
703         }
704
705         return 0;
706 }
707
708 void kfd_process_destroy_wq(void)
709 {
710         if (kfd_process_wq) {
711                 destroy_workqueue(kfd_process_wq);
712                 kfd_process_wq = NULL;
713         }
714         if (kfd_restore_wq) {
715                 destroy_workqueue(kfd_restore_wq);
716                 kfd_restore_wq = NULL;
717         }
718 }
719
720 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
721                         struct kfd_process_device *pdd, void **kptr)
722 {
723         struct kfd_node *dev = pdd->dev;
724
725         if (kptr && *kptr) {
726                 amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
727                 *kptr = NULL;
728         }
729
730         amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv);
731         amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv,
732                                                NULL);
733 }
734
735 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
736  *      This function should be only called right after the process
737  *      is created and when kfd_processes_mutex is still being held
738  *      to avoid concurrency. Because of that exclusiveness, we do
739  *      not need to take p->mutex.
740  */
741 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
742                                    uint64_t gpu_va, uint32_t size,
743                                    uint32_t flags, struct kgd_mem **mem, void **kptr)
744 {
745         struct kfd_node *kdev = pdd->dev;
746         int err;
747
748         err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size,
749                                                  pdd->drm_priv, mem, NULL,
750                                                  flags, false);
751         if (err)
752                 goto err_alloc_mem;
753
754         err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem,
755                         pdd->drm_priv);
756         if (err)
757                 goto err_map_mem;
758
759         err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true);
760         if (err) {
761                 pr_debug("Sync memory failed, wait interrupted by user signal\n");
762                 goto sync_memory_failed;
763         }
764
765         if (kptr) {
766                 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(
767                                 (struct kgd_mem *)*mem, kptr, NULL);
768                 if (err) {
769                         pr_debug("Map GTT BO to kernel failed\n");
770                         goto sync_memory_failed;
771                 }
772         }
773
774         return err;
775
776 sync_memory_failed:
777         amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv);
778
779 err_map_mem:
780         amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv,
781                                                NULL);
782 err_alloc_mem:
783         *mem = NULL;
784         *kptr = NULL;
785         return err;
786 }
787
788 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
789  *      process for IB usage The memory reserved is for KFD to submit
790  *      IB to AMDGPU from kernel.  If the memory is reserved
791  *      successfully, ib_kaddr will have the CPU/kernel
792  *      address. Check ib_kaddr before accessing the memory.
793  */
794 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
795 {
796         struct qcm_process_device *qpd = &pdd->qpd;
797         uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
798                         KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
799                         KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
800                         KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
801         struct kgd_mem *mem;
802         void *kaddr;
803         int ret;
804
805         if (qpd->ib_kaddr || !qpd->ib_base)
806                 return 0;
807
808         /* ib_base is only set for dGPU */
809         ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
810                                       &mem, &kaddr);
811         if (ret)
812                 return ret;
813
814         qpd->ib_mem = mem;
815         qpd->ib_kaddr = kaddr;
816
817         return 0;
818 }
819
820 static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd)
821 {
822         struct qcm_process_device *qpd = &pdd->qpd;
823
824         if (!qpd->ib_kaddr || !qpd->ib_base)
825                 return;
826
827         kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr);
828 }
829
830 struct kfd_process *kfd_create_process(struct task_struct *thread)
831 {
832         struct kfd_process *process;
833         int ret;
834
835         if (!(thread->mm && mmget_not_zero(thread->mm)))
836                 return ERR_PTR(-EINVAL);
837
838         /* Only the pthreads threading model is supported. */
839         if (thread->group_leader->mm != thread->mm) {
840                 mmput(thread->mm);
841                 return ERR_PTR(-EINVAL);
842         }
843
844         /*
845          * take kfd processes mutex before starting of process creation
846          * so there won't be a case where two threads of the same process
847          * create two kfd_process structures
848          */
849         mutex_lock(&kfd_processes_mutex);
850
851         if (kfd_is_locked()) {
852                 pr_debug("KFD is locked! Cannot create process");
853                 process = ERR_PTR(-EINVAL);
854                 goto out;
855         }
856
857         /* A prior open of /dev/kfd could have already created the process.
858          * find_process will increase process kref in this case
859          */
860         process = find_process(thread, true);
861         if (process) {
862                 pr_debug("Process already found\n");
863         } else {
864                 /* If the process just called exec(3), it is possible that the
865                  * cleanup of the kfd_process (following the release of the mm
866                  * of the old process image) is still in the cleanup work queue.
867                  * Make sure to drain any job before trying to recreate any
868                  * resource for this process.
869                  */
870                 flush_workqueue(kfd_process_wq);
871
872                 process = create_process(thread);
873                 if (IS_ERR(process))
874                         goto out;
875
876                 if (!procfs.kobj)
877                         goto out;
878
879                 process->kobj = kfd_alloc_struct(process->kobj);
880                 if (!process->kobj) {
881                         pr_warn("Creating procfs kobject failed");
882                         goto out;
883                 }
884                 ret = kobject_init_and_add(process->kobj, &procfs_type,
885                                            procfs.kobj, "%d",
886                                            (int)process->lead_thread->pid);
887                 if (ret) {
888                         pr_warn("Creating procfs pid directory failed");
889                         kobject_put(process->kobj);
890                         goto out;
891                 }
892
893                 kfd_sysfs_create_file(process->kobj, &process->attr_pasid,
894                                       "pasid");
895
896                 process->kobj_queues = kobject_create_and_add("queues",
897                                                         process->kobj);
898                 if (!process->kobj_queues)
899                         pr_warn("Creating KFD proc/queues folder failed");
900
901                 kfd_procfs_add_sysfs_stats(process);
902                 kfd_procfs_add_sysfs_files(process);
903                 kfd_procfs_add_sysfs_counters(process);
904
905                 init_waitqueue_head(&process->wait_irq_drain);
906         }
907 out:
908         mutex_unlock(&kfd_processes_mutex);
909         mmput(thread->mm);
910
911         return process;
912 }
913
914 struct kfd_process *kfd_get_process(const struct task_struct *thread)
915 {
916         struct kfd_process *process;
917
918         if (!thread->mm)
919                 return ERR_PTR(-EINVAL);
920
921         /* Only the pthreads threading model is supported. */
922         if (thread->group_leader->mm != thread->mm)
923                 return ERR_PTR(-EINVAL);
924
925         process = find_process(thread, false);
926         if (!process)
927                 return ERR_PTR(-EINVAL);
928
929         return process;
930 }
931
932 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
933 {
934         struct kfd_process *process;
935
936         hash_for_each_possible_rcu(kfd_processes_table, process,
937                                         kfd_processes, (uintptr_t)mm)
938                 if (process->mm == mm)
939                         return process;
940
941         return NULL;
942 }
943
944 static struct kfd_process *find_process(const struct task_struct *thread,
945                                         bool ref)
946 {
947         struct kfd_process *p;
948         int idx;
949
950         idx = srcu_read_lock(&kfd_processes_srcu);
951         p = find_process_by_mm(thread->mm);
952         if (p && ref)
953                 kref_get(&p->ref);
954         srcu_read_unlock(&kfd_processes_srcu, idx);
955
956         return p;
957 }
958
959 void kfd_unref_process(struct kfd_process *p)
960 {
961         kref_put(&p->ref, kfd_process_ref_release);
962 }
963
964 /* This increments the process->ref counter. */
965 struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid)
966 {
967         struct task_struct *task = NULL;
968         struct kfd_process *p    = NULL;
969
970         if (!pid) {
971                 task = current;
972                 get_task_struct(task);
973         } else {
974                 task = get_pid_task(pid, PIDTYPE_PID);
975         }
976
977         if (task) {
978                 p = find_process(task, true);
979                 put_task_struct(task);
980         }
981
982         return p;
983 }
984
985 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
986 {
987         struct kfd_process *p = pdd->process;
988         void *mem;
989         int id;
990         int i;
991
992         /*
993          * Remove all handles from idr and release appropriate
994          * local memory object
995          */
996         idr_for_each_entry(&pdd->alloc_idr, mem, id) {
997
998                 for (i = 0; i < p->n_pdds; i++) {
999                         struct kfd_process_device *peer_pdd = p->pdds[i];
1000
1001                         if (!peer_pdd->drm_priv)
1002                                 continue;
1003                         amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1004                                 peer_pdd->dev->adev, mem, peer_pdd->drm_priv);
1005                 }
1006
1007                 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem,
1008                                                        pdd->drm_priv, NULL);
1009                 kfd_process_device_remove_obj_handle(pdd, id);
1010         }
1011 }
1012
1013 /*
1014  * Just kunmap and unpin signal BO here. It will be freed in
1015  * kfd_process_free_outstanding_kfd_bos()
1016  */
1017 static void kfd_process_kunmap_signal_bo(struct kfd_process *p)
1018 {
1019         struct kfd_process_device *pdd;
1020         struct kfd_node *kdev;
1021         void *mem;
1022
1023         kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle));
1024         if (!kdev)
1025                 return;
1026
1027         mutex_lock(&p->mutex);
1028
1029         pdd = kfd_get_process_device_data(kdev, p);
1030         if (!pdd)
1031                 goto out;
1032
1033         mem = kfd_process_device_translate_handle(
1034                 pdd, GET_IDR_HANDLE(p->signal_handle));
1035         if (!mem)
1036                 goto out;
1037
1038         amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(mem);
1039
1040 out:
1041         mutex_unlock(&p->mutex);
1042 }
1043
1044 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
1045 {
1046         int i;
1047
1048         for (i = 0; i < p->n_pdds; i++)
1049                 kfd_process_device_free_bos(p->pdds[i]);
1050 }
1051
1052 static void kfd_process_destroy_pdds(struct kfd_process *p)
1053 {
1054         int i;
1055
1056         for (i = 0; i < p->n_pdds; i++) {
1057                 struct kfd_process_device *pdd = p->pdds[i];
1058
1059                 pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
1060                                 pdd->dev->id, p->pasid);
1061
1062                 kfd_process_device_destroy_cwsr_dgpu(pdd);
1063                 kfd_process_device_destroy_ib_mem(pdd);
1064
1065                 if (pdd->drm_file) {
1066                         amdgpu_amdkfd_gpuvm_release_process_vm(
1067                                         pdd->dev->adev, pdd->drm_priv);
1068                         fput(pdd->drm_file);
1069                 }
1070
1071                 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
1072                         free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
1073                                 get_order(KFD_CWSR_TBA_TMA_SIZE));
1074
1075                 idr_destroy(&pdd->alloc_idr);
1076
1077                 kfd_free_process_doorbells(pdd->dev->kfd, pdd);
1078
1079                 if (pdd->dev->kfd->shared_resources.enable_mes &&
1080                         pdd->proc_ctx_cpu_ptr)
1081                         amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev,
1082                                                    &pdd->proc_ctx_bo);
1083                 /*
1084                  * before destroying pdd, make sure to report availability
1085                  * for auto suspend
1086                  */
1087                 if (pdd->runtime_inuse) {
1088                         pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev);
1089                         pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev);
1090                         pdd->runtime_inuse = false;
1091                 }
1092
1093                 kfree(pdd);
1094                 p->pdds[i] = NULL;
1095         }
1096         p->n_pdds = 0;
1097 }
1098
1099 static void kfd_process_remove_sysfs(struct kfd_process *p)
1100 {
1101         struct kfd_process_device *pdd;
1102         int i;
1103
1104         if (!p->kobj)
1105                 return;
1106
1107         sysfs_remove_file(p->kobj, &p->attr_pasid);
1108         kobject_del(p->kobj_queues);
1109         kobject_put(p->kobj_queues);
1110         p->kobj_queues = NULL;
1111
1112         for (i = 0; i < p->n_pdds; i++) {
1113                 pdd = p->pdds[i];
1114
1115                 sysfs_remove_file(p->kobj, &pdd->attr_vram);
1116                 sysfs_remove_file(p->kobj, &pdd->attr_sdma);
1117
1118                 sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict);
1119                 if (pdd->dev->kfd2kgd->get_cu_occupancy)
1120                         sysfs_remove_file(pdd->kobj_stats,
1121                                           &pdd->attr_cu_occupancy);
1122                 kobject_del(pdd->kobj_stats);
1123                 kobject_put(pdd->kobj_stats);
1124                 pdd->kobj_stats = NULL;
1125         }
1126
1127         for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
1128                 pdd = p->pdds[i];
1129
1130                 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults);
1131                 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in);
1132                 sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out);
1133                 kobject_del(pdd->kobj_counters);
1134                 kobject_put(pdd->kobj_counters);
1135                 pdd->kobj_counters = NULL;
1136         }
1137
1138         kobject_del(p->kobj);
1139         kobject_put(p->kobj);
1140         p->kobj = NULL;
1141 }
1142
1143 /* No process locking is needed in this function, because the process
1144  * is not findable any more. We must assume that no other thread is
1145  * using it any more, otherwise we couldn't safely free the process
1146  * structure in the end.
1147  */
1148 static void kfd_process_wq_release(struct work_struct *work)
1149 {
1150         struct kfd_process *p = container_of(work, struct kfd_process,
1151                                              release_work);
1152         struct dma_fence *ef;
1153
1154         kfd_process_dequeue_from_all_devices(p);
1155         pqm_uninit(&p->pqm);
1156
1157         /* Signal the eviction fence after user mode queues are
1158          * destroyed. This allows any BOs to be freed without
1159          * triggering pointless evictions or waiting for fences.
1160          */
1161         synchronize_rcu();
1162         ef = rcu_access_pointer(p->ef);
1163         dma_fence_signal(ef);
1164
1165         kfd_process_remove_sysfs(p);
1166
1167         kfd_process_kunmap_signal_bo(p);
1168         kfd_process_free_outstanding_kfd_bos(p);
1169         svm_range_list_fini(p);
1170
1171         kfd_process_destroy_pdds(p);
1172         dma_fence_put(ef);
1173
1174         kfd_event_free_process(p);
1175
1176         kfd_pasid_free(p->pasid);
1177         mutex_destroy(&p->mutex);
1178
1179         put_task_struct(p->lead_thread);
1180
1181         kfree(p);
1182 }
1183
1184 static void kfd_process_ref_release(struct kref *ref)
1185 {
1186         struct kfd_process *p = container_of(ref, struct kfd_process, ref);
1187
1188         INIT_WORK(&p->release_work, kfd_process_wq_release);
1189         queue_work(kfd_process_wq, &p->release_work);
1190 }
1191
1192 static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
1193 {
1194         /* This increments p->ref counter if kfd process p exists */
1195         struct kfd_process *p = kfd_lookup_process_by_mm(mm);
1196
1197         return p ? &p->mmu_notifier : ERR_PTR(-ESRCH);
1198 }
1199
1200 static void kfd_process_free_notifier(struct mmu_notifier *mn)
1201 {
1202         kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
1203 }
1204
1205 static void kfd_process_notifier_release_internal(struct kfd_process *p)
1206 {
1207         int i;
1208
1209         cancel_delayed_work_sync(&p->eviction_work);
1210         cancel_delayed_work_sync(&p->restore_work);
1211
1212         for (i = 0; i < p->n_pdds; i++) {
1213                 struct kfd_process_device *pdd = p->pdds[i];
1214
1215                 /* re-enable GFX OFF since runtime enable with ttmp setup disabled it. */
1216                 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev) && p->runtime_info.ttmp_setup)
1217                         amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
1218         }
1219
1220         /* Indicate to other users that MM is no longer valid */
1221         p->mm = NULL;
1222         kfd_dbg_trap_disable(p);
1223
1224         if (atomic_read(&p->debugged_process_count) > 0) {
1225                 struct kfd_process *target;
1226                 unsigned int temp;
1227                 int idx = srcu_read_lock(&kfd_processes_srcu);
1228
1229                 hash_for_each_rcu(kfd_processes_table, temp, target, kfd_processes) {
1230                         if (target->debugger_process && target->debugger_process == p) {
1231                                 mutex_lock_nested(&target->mutex, 1);
1232                                 kfd_dbg_trap_disable(target);
1233                                 mutex_unlock(&target->mutex);
1234                                 if (atomic_read(&p->debugged_process_count) == 0)
1235                                         break;
1236                         }
1237                 }
1238
1239                 srcu_read_unlock(&kfd_processes_srcu, idx);
1240         }
1241
1242         mmu_notifier_put(&p->mmu_notifier);
1243 }
1244
1245 static void kfd_process_notifier_release(struct mmu_notifier *mn,
1246                                         struct mm_struct *mm)
1247 {
1248         struct kfd_process *p;
1249
1250         /*
1251          * The kfd_process structure can not be free because the
1252          * mmu_notifier srcu is read locked
1253          */
1254         p = container_of(mn, struct kfd_process, mmu_notifier);
1255         if (WARN_ON(p->mm != mm))
1256                 return;
1257
1258         mutex_lock(&kfd_processes_mutex);
1259         /*
1260          * Do early return if table is empty.
1261          *
1262          * This could potentially happen if this function is called concurrently
1263          * by mmu_notifier and by kfd_cleanup_pocesses.
1264          *
1265          */
1266         if (hash_empty(kfd_processes_table)) {
1267                 mutex_unlock(&kfd_processes_mutex);
1268                 return;
1269         }
1270         hash_del_rcu(&p->kfd_processes);
1271         mutex_unlock(&kfd_processes_mutex);
1272         synchronize_srcu(&kfd_processes_srcu);
1273
1274         kfd_process_notifier_release_internal(p);
1275 }
1276
1277 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
1278         .release = kfd_process_notifier_release,
1279         .alloc_notifier = kfd_process_alloc_notifier,
1280         .free_notifier = kfd_process_free_notifier,
1281 };
1282
1283 /*
1284  * This code handles the case when driver is being unloaded before all
1285  * mm_struct are released.  We need to safely free the kfd_process and
1286  * avoid race conditions with mmu_notifier that might try to free them.
1287  *
1288  */
1289 void kfd_cleanup_processes(void)
1290 {
1291         struct kfd_process *p;
1292         struct hlist_node *p_temp;
1293         unsigned int temp;
1294         HLIST_HEAD(cleanup_list);
1295
1296         /*
1297          * Move all remaining kfd_process from the process table to a
1298          * temp list for processing.   Once done, callback from mmu_notifier
1299          * release will not see the kfd_process in the table and do early return,
1300          * avoiding double free issues.
1301          */
1302         mutex_lock(&kfd_processes_mutex);
1303         hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
1304                 hash_del_rcu(&p->kfd_processes);
1305                 synchronize_srcu(&kfd_processes_srcu);
1306                 hlist_add_head(&p->kfd_processes, &cleanup_list);
1307         }
1308         mutex_unlock(&kfd_processes_mutex);
1309
1310         hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes)
1311                 kfd_process_notifier_release_internal(p);
1312
1313         /*
1314          * Ensures that all outstanding free_notifier get called, triggering
1315          * the release of the kfd_process struct.
1316          */
1317         mmu_notifier_synchronize();
1318 }
1319
1320 int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
1321 {
1322         unsigned long  offset;
1323         int i;
1324
1325         if (p->has_cwsr)
1326                 return 0;
1327
1328         for (i = 0; i < p->n_pdds; i++) {
1329                 struct kfd_node *dev = p->pdds[i]->dev;
1330                 struct qcm_process_device *qpd = &p->pdds[i]->qpd;
1331
1332                 if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
1333                         continue;
1334
1335                 offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
1336                 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
1337                         KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
1338                         MAP_SHARED, offset);
1339
1340                 if (IS_ERR_VALUE(qpd->tba_addr)) {
1341                         int err = qpd->tba_addr;
1342
1343                         dev_err(dev->adev->dev,
1344                                 "Failure to set tba address. error %d.\n", err);
1345                         qpd->tba_addr = 0;
1346                         qpd->cwsr_kaddr = NULL;
1347                         return err;
1348                 }
1349
1350                 memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
1351
1352                 kfd_process_set_trap_debug_flag(qpd, p->debug_trap_enabled);
1353
1354                 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1355                 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1356                         qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1357         }
1358
1359         p->has_cwsr = true;
1360
1361         return 0;
1362 }
1363
1364 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
1365 {
1366         struct kfd_node *dev = pdd->dev;
1367         struct qcm_process_device *qpd = &pdd->qpd;
1368         uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
1369                         | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
1370                         | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
1371         struct kgd_mem *mem;
1372         void *kaddr;
1373         int ret;
1374
1375         if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
1376                 return 0;
1377
1378         /* cwsr_base is only set for dGPU */
1379         ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
1380                                       KFD_CWSR_TBA_TMA_SIZE, flags, &mem, &kaddr);
1381         if (ret)
1382                 return ret;
1383
1384         qpd->cwsr_mem = mem;
1385         qpd->cwsr_kaddr = kaddr;
1386         qpd->tba_addr = qpd->cwsr_base;
1387
1388         memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size);
1389
1390         kfd_process_set_trap_debug_flag(&pdd->qpd,
1391                                         pdd->process->debug_trap_enabled);
1392
1393         qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
1394         pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
1395                  qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
1396
1397         return 0;
1398 }
1399
1400 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd)
1401 {
1402         struct kfd_node *dev = pdd->dev;
1403         struct qcm_process_device *qpd = &pdd->qpd;
1404
1405         if (!dev->kfd->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
1406                 return;
1407
1408         kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, &qpd->cwsr_kaddr);
1409 }
1410
1411 void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
1412                                   uint64_t tba_addr,
1413                                   uint64_t tma_addr)
1414 {
1415         if (qpd->cwsr_kaddr) {
1416                 /* KFD trap handler is bound, record as second-level TBA/TMA
1417                  * in first-level TMA. First-level trap will jump to second.
1418                  */
1419                 uint64_t *tma =
1420                         (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1421                 tma[0] = tba_addr;
1422                 tma[1] = tma_addr;
1423         } else {
1424                 /* No trap handler bound, bind as first-level TBA/TMA. */
1425                 qpd->tba_addr = tba_addr;
1426                 qpd->tma_addr = tma_addr;
1427         }
1428 }
1429
1430 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
1431 {
1432         int i;
1433
1434         /* On most GFXv9 GPUs, the retry mode in the SQ must match the
1435          * boot time retry setting. Mixing processes with different
1436          * XNACK/retry settings can hang the GPU.
1437          *
1438          * Different GPUs can have different noretry settings depending
1439          * on HW bugs or limitations. We need to find at least one
1440          * XNACK mode for this process that's compatible with all GPUs.
1441          * Fortunately GPUs with retry enabled (noretry=0) can run code
1442          * built for XNACK-off. On GFXv9 it may perform slower.
1443          *
1444          * Therefore applications built for XNACK-off can always be
1445          * supported and will be our fallback if any GPU does not
1446          * support retry.
1447          */
1448         for (i = 0; i < p->n_pdds; i++) {
1449                 struct kfd_node *dev = p->pdds[i]->dev;
1450
1451                 /* Only consider GFXv9 and higher GPUs. Older GPUs don't
1452                  * support the SVM APIs and don't need to be considered
1453                  * for the XNACK mode selection.
1454                  */
1455                 if (!KFD_IS_SOC15(dev))
1456                         continue;
1457                 /* Aldebaran can always support XNACK because it can support
1458                  * per-process XNACK mode selection. But let the dev->noretry
1459                  * setting still influence the default XNACK mode.
1460                  */
1461                 if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev)) {
1462                         if (!amdgpu_sriov_xnack_support(dev->kfd->adev)) {
1463                                 pr_debug("SRIOV platform xnack not supported\n");
1464                                 return false;
1465                         }
1466                         continue;
1467                 }
1468
1469                 /* GFXv10 and later GPUs do not support shader preemption
1470                  * during page faults. This can lead to poor QoS for queue
1471                  * management and memory-manager-related preemptions or
1472                  * even deadlocks.
1473                  */
1474                 if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
1475                         return false;
1476
1477                 if (dev->kfd->noretry)
1478                         return false;
1479         }
1480
1481         return true;
1482 }
1483
1484 void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd,
1485                                      bool enabled)
1486 {
1487         if (qpd->cwsr_kaddr) {
1488                 uint64_t *tma =
1489                         (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1490                 tma[2] = enabled;
1491         }
1492 }
1493
1494 /*
1495  * On return the kfd_process is fully operational and will be freed when the
1496  * mm is released
1497  */
1498 static struct kfd_process *create_process(const struct task_struct *thread)
1499 {
1500         struct kfd_process *process;
1501         struct mmu_notifier *mn;
1502         int err = -ENOMEM;
1503
1504         process = kzalloc(sizeof(*process), GFP_KERNEL);
1505         if (!process)
1506                 goto err_alloc_process;
1507
1508         kref_init(&process->ref);
1509         mutex_init(&process->mutex);
1510         process->mm = thread->mm;
1511         process->lead_thread = thread->group_leader;
1512         process->n_pdds = 0;
1513         process->queues_paused = false;
1514         INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
1515         INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
1516         process->last_restore_timestamp = get_jiffies_64();
1517         err = kfd_event_init_process(process);
1518         if (err)
1519                 goto err_event_init;
1520         process->is_32bit_user_mode = in_compat_syscall();
1521         process->debug_trap_enabled = false;
1522         process->debugger_process = NULL;
1523         process->exception_enable_mask = 0;
1524         atomic_set(&process->debugged_process_count, 0);
1525         sema_init(&process->runtime_enable_sema, 0);
1526
1527         process->pasid = kfd_pasid_alloc();
1528         if (process->pasid == 0) {
1529                 err = -ENOSPC;
1530                 goto err_alloc_pasid;
1531         }
1532
1533         err = pqm_init(&process->pqm, process);
1534         if (err != 0)
1535                 goto err_process_pqm_init;
1536
1537         /* init process apertures*/
1538         err = kfd_init_apertures(process);
1539         if (err != 0)
1540                 goto err_init_apertures;
1541
1542         /* Check XNACK support after PDDs are created in kfd_init_apertures */
1543         process->xnack_enabled = kfd_process_xnack_mode(process, false);
1544
1545         err = svm_range_list_init(process);
1546         if (err)
1547                 goto err_init_svm_range_list;
1548
1549         /* alloc_notifier needs to find the process in the hash table */
1550         hash_add_rcu(kfd_processes_table, &process->kfd_processes,
1551                         (uintptr_t)process->mm);
1552
1553         /* Avoid free_notifier to start kfd_process_wq_release if
1554          * mmu_notifier_get failed because of pending signal.
1555          */
1556         kref_get(&process->ref);
1557
1558         /* MMU notifier registration must be the last call that can fail
1559          * because after this point we cannot unwind the process creation.
1560          * After this point, mmu_notifier_put will trigger the cleanup by
1561          * dropping the last process reference in the free_notifier.
1562          */
1563         mn = mmu_notifier_get(&kfd_process_mmu_notifier_ops, process->mm);
1564         if (IS_ERR(mn)) {
1565                 err = PTR_ERR(mn);
1566                 goto err_register_notifier;
1567         }
1568         BUG_ON(mn != &process->mmu_notifier);
1569
1570         kfd_unref_process(process);
1571         get_task_struct(process->lead_thread);
1572
1573         INIT_WORK(&process->debug_event_workarea, debug_event_write_work_handler);
1574
1575         return process;
1576
1577 err_register_notifier:
1578         hash_del_rcu(&process->kfd_processes);
1579         svm_range_list_fini(process);
1580 err_init_svm_range_list:
1581         kfd_process_free_outstanding_kfd_bos(process);
1582         kfd_process_destroy_pdds(process);
1583 err_init_apertures:
1584         pqm_uninit(&process->pqm);
1585 err_process_pqm_init:
1586         kfd_pasid_free(process->pasid);
1587 err_alloc_pasid:
1588         kfd_event_free_process(process);
1589 err_event_init:
1590         mutex_destroy(&process->mutex);
1591         kfree(process);
1592 err_alloc_process:
1593         return ERR_PTR(err);
1594 }
1595
1596 struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev,
1597                                                         struct kfd_process *p)
1598 {
1599         int i;
1600
1601         for (i = 0; i < p->n_pdds; i++)
1602                 if (p->pdds[i]->dev == dev)
1603                         return p->pdds[i];
1604
1605         return NULL;
1606 }
1607
1608 struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
1609                                                         struct kfd_process *p)
1610 {
1611         struct kfd_process_device *pdd = NULL;
1612
1613         if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
1614                 return NULL;
1615         pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
1616         if (!pdd)
1617                 return NULL;
1618
1619         pdd->dev = dev;
1620         INIT_LIST_HEAD(&pdd->qpd.queues_list);
1621         INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
1622         pdd->qpd.dqm = dev->dqm;
1623         pdd->qpd.pqm = &p->pqm;
1624         pdd->qpd.evicted = 0;
1625         pdd->qpd.mapped_gws_queue = false;
1626         pdd->process = p;
1627         pdd->bound = PDD_UNBOUND;
1628         pdd->already_dequeued = false;
1629         pdd->runtime_inuse = false;
1630         atomic64_set(&pdd->vram_usage, 0);
1631         pdd->sdma_past_activity_counter = 0;
1632         pdd->user_gpu_id = dev->id;
1633         atomic64_set(&pdd->evict_duration_counter, 0);
1634
1635         p->pdds[p->n_pdds++] = pdd;
1636         if (kfd_dbg_is_per_vmid_supported(pdd->dev))
1637                 pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap(
1638                                                         pdd->dev->adev,
1639                                                         false,
1640                                                         0);
1641
1642         /* Init idr used for memory handle translation */
1643         idr_init(&pdd->alloc_idr);
1644
1645         return pdd;
1646 }
1647
1648 /**
1649  * kfd_process_device_init_vm - Initialize a VM for a process-device
1650  *
1651  * @pdd: The process-device
1652  * @drm_file: Optional pointer to a DRM file descriptor
1653  *
1654  * If @drm_file is specified, it will be used to acquire the VM from
1655  * that file descriptor. If successful, the @pdd takes ownership of
1656  * the file descriptor.
1657  *
1658  * If @drm_file is NULL, a new VM is created.
1659  *
1660  * Returns 0 on success, -errno on failure.
1661  */
1662 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
1663                                struct file *drm_file)
1664 {
1665         struct amdgpu_fpriv *drv_priv;
1666         struct amdgpu_vm *avm;
1667         struct kfd_process *p;
1668         struct dma_fence *ef;
1669         struct kfd_node *dev;
1670         int ret;
1671
1672         if (!drm_file)
1673                 return -EINVAL;
1674
1675         if (pdd->drm_priv)
1676                 return -EBUSY;
1677
1678         ret = amdgpu_file_to_fpriv(drm_file, &drv_priv);
1679         if (ret)
1680                 return ret;
1681         avm = &drv_priv->vm;
1682
1683         p = pdd->process;
1684         dev = pdd->dev;
1685
1686         ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
1687                                                      &p->kgd_process_info,
1688                                                      p->ef ? NULL : &ef);
1689         if (ret) {
1690                 dev_err(dev->adev->dev, "Failed to create process VM object\n");
1691                 return ret;
1692         }
1693
1694         if (!p->ef)
1695                 RCU_INIT_POINTER(p->ef, ef);
1696
1697         pdd->drm_priv = drm_file->private_data;
1698
1699         ret = kfd_process_device_reserve_ib_mem(pdd);
1700         if (ret)
1701                 goto err_reserve_ib_mem;
1702         ret = kfd_process_device_init_cwsr_dgpu(pdd);
1703         if (ret)
1704                 goto err_init_cwsr;
1705
1706         ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, avm, p->pasid);
1707         if (ret)
1708                 goto err_set_pasid;
1709
1710         pdd->drm_file = drm_file;
1711
1712         return 0;
1713
1714 err_set_pasid:
1715         kfd_process_device_destroy_cwsr_dgpu(pdd);
1716 err_init_cwsr:
1717         kfd_process_device_destroy_ib_mem(pdd);
1718 err_reserve_ib_mem:
1719         pdd->drm_priv = NULL;
1720         amdgpu_amdkfd_gpuvm_destroy_cb(dev->adev, avm);
1721
1722         return ret;
1723 }
1724
1725 /*
1726  * Direct the IOMMU to bind the process (specifically the pasid->mm)
1727  * to the device.
1728  * Unbinding occurs when the process dies or the device is removed.
1729  *
1730  * Assumes that the process lock is held.
1731  */
1732 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev,
1733                                                         struct kfd_process *p)
1734 {
1735         struct kfd_process_device *pdd;
1736         int err;
1737
1738         pdd = kfd_get_process_device_data(dev, p);
1739         if (!pdd) {
1740                 dev_err(dev->adev->dev, "Process device data doesn't exist\n");
1741                 return ERR_PTR(-ENOMEM);
1742         }
1743
1744         if (!pdd->drm_priv)
1745                 return ERR_PTR(-ENODEV);
1746
1747         /*
1748          * signal runtime-pm system to auto resume and prevent
1749          * further runtime suspend once device pdd is created until
1750          * pdd is destroyed.
1751          */
1752         if (!pdd->runtime_inuse) {
1753                 err = pm_runtime_get_sync(adev_to_drm(dev->adev)->dev);
1754                 if (err < 0) {
1755                         pm_runtime_put_autosuspend(adev_to_drm(dev->adev)->dev);
1756                         return ERR_PTR(err);
1757                 }
1758         }
1759
1760         /*
1761          * make sure that runtime_usage counter is incremented just once
1762          * per pdd
1763          */
1764         pdd->runtime_inuse = true;
1765
1766         return pdd;
1767 }
1768
1769 /* Create specific handle mapped to mem from process local memory idr
1770  * Assumes that the process lock is held.
1771  */
1772 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
1773                                         void *mem)
1774 {
1775         return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
1776 }
1777
1778 /* Translate specific handle from process local memory idr
1779  * Assumes that the process lock is held.
1780  */
1781 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
1782                                         int handle)
1783 {
1784         if (handle < 0)
1785                 return NULL;
1786
1787         return idr_find(&pdd->alloc_idr, handle);
1788 }
1789
1790 /* Remove specific handle from process local memory idr
1791  * Assumes that the process lock is held.
1792  */
1793 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
1794                                         int handle)
1795 {
1796         if (handle >= 0)
1797                 idr_remove(&pdd->alloc_idr, handle);
1798 }
1799
1800 /* This increments the process->ref counter. */
1801 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid)
1802 {
1803         struct kfd_process *p, *ret_p = NULL;
1804         unsigned int temp;
1805
1806         int idx = srcu_read_lock(&kfd_processes_srcu);
1807
1808         hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1809                 if (p->pasid == pasid) {
1810                         kref_get(&p->ref);
1811                         ret_p = p;
1812                         break;
1813                 }
1814         }
1815
1816         srcu_read_unlock(&kfd_processes_srcu, idx);
1817
1818         return ret_p;
1819 }
1820
1821 /* This increments the process->ref counter. */
1822 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
1823 {
1824         struct kfd_process *p;
1825
1826         int idx = srcu_read_lock(&kfd_processes_srcu);
1827
1828         p = find_process_by_mm(mm);
1829         if (p)
1830                 kref_get(&p->ref);
1831
1832         srcu_read_unlock(&kfd_processes_srcu, idx);
1833
1834         return p;
1835 }
1836
1837 /* kfd_process_evict_queues - Evict all user queues of a process
1838  *
1839  * Eviction is reference-counted per process-device. This means multiple
1840  * evictions from different sources can be nested safely.
1841  */
1842 int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger)
1843 {
1844         int r = 0;
1845         int i;
1846         unsigned int n_evicted = 0;
1847
1848         for (i = 0; i < p->n_pdds; i++) {
1849                 struct kfd_process_device *pdd = p->pdds[i];
1850                 struct device *dev = pdd->dev->adev->dev;
1851
1852                 kfd_smi_event_queue_eviction(pdd->dev, p->lead_thread->pid,
1853                                              trigger);
1854
1855                 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
1856                                                             &pdd->qpd);
1857                 /* evict return -EIO if HWS is hang or asic is resetting, in this case
1858                  * we would like to set all the queues to be in evicted state to prevent
1859                  * them been add back since they actually not be saved right now.
1860                  */
1861                 if (r && r != -EIO) {
1862                         dev_err(dev, "Failed to evict process queues\n");
1863                         goto fail;
1864                 }
1865                 n_evicted++;
1866
1867                 pdd->dev->dqm->is_hws_hang = false;
1868         }
1869
1870         return r;
1871
1872 fail:
1873         /* To keep state consistent, roll back partial eviction by
1874          * restoring queues
1875          */
1876         for (i = 0; i < p->n_pdds; i++) {
1877                 struct kfd_process_device *pdd = p->pdds[i];
1878
1879                 if (n_evicted == 0)
1880                         break;
1881
1882                 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
1883
1884                 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1885                                                               &pdd->qpd))
1886                         dev_err(pdd->dev->adev->dev,
1887                                 "Failed to restore queues\n");
1888
1889                 n_evicted--;
1890         }
1891
1892         return r;
1893 }
1894
1895 /* kfd_process_restore_queues - Restore all user queues of a process */
1896 int kfd_process_restore_queues(struct kfd_process *p)
1897 {
1898         int r, ret = 0;
1899         int i;
1900
1901         for (i = 0; i < p->n_pdds; i++) {
1902                 struct kfd_process_device *pdd = p->pdds[i];
1903                 struct device *dev = pdd->dev->adev->dev;
1904
1905                 kfd_smi_event_queue_restore(pdd->dev, p->lead_thread->pid);
1906
1907                 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
1908                                                               &pdd->qpd);
1909                 if (r) {
1910                         dev_err(dev, "Failed to restore process queues\n");
1911                         if (!ret)
1912                                 ret = r;
1913                 }
1914         }
1915
1916         return ret;
1917 }
1918
1919 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
1920 {
1921         int i;
1922
1923         for (i = 0; i < p->n_pdds; i++)
1924                 if (p->pdds[i] && gpu_id == p->pdds[i]->user_gpu_id)
1925                         return i;
1926         return -EINVAL;
1927 }
1928
1929 int
1930 kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node,
1931                             uint32_t *gpuid, uint32_t *gpuidx)
1932 {
1933         int i;
1934
1935         for (i = 0; i < p->n_pdds; i++)
1936                 if (p->pdds[i] && p->pdds[i]->dev == node) {
1937                         *gpuid = p->pdds[i]->user_gpu_id;
1938                         *gpuidx = i;
1939                         return 0;
1940                 }
1941         return -EINVAL;
1942 }
1943
1944 static int signal_eviction_fence(struct kfd_process *p)
1945 {
1946         struct dma_fence *ef;
1947         int ret;
1948
1949         rcu_read_lock();
1950         ef = dma_fence_get_rcu_safe(&p->ef);
1951         rcu_read_unlock();
1952         if (!ef)
1953                 return -EINVAL;
1954
1955         ret = dma_fence_signal(ef);
1956         dma_fence_put(ef);
1957
1958         return ret;
1959 }
1960
1961 static void evict_process_worker(struct work_struct *work)
1962 {
1963         int ret;
1964         struct kfd_process *p;
1965         struct delayed_work *dwork;
1966
1967         dwork = to_delayed_work(work);
1968
1969         /* Process termination destroys this worker thread. So during the
1970          * lifetime of this thread, kfd_process p will be valid
1971          */
1972         p = container_of(dwork, struct kfd_process, eviction_work);
1973
1974         pr_debug("Started evicting pasid 0x%x\n", p->pasid);
1975         ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_TTM);
1976         if (!ret) {
1977                 /* If another thread already signaled the eviction fence,
1978                  * they are responsible stopping the queues and scheduling
1979                  * the restore work.
1980                  */
1981                 if (signal_eviction_fence(p) ||
1982                     mod_delayed_work(kfd_restore_wq, &p->restore_work,
1983                                      msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
1984                         kfd_process_restore_queues(p);
1985
1986                 pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
1987         } else
1988                 pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
1989 }
1990
1991 static int restore_process_helper(struct kfd_process *p)
1992 {
1993         int ret = 0;
1994
1995         /* VMs may not have been acquired yet during debugging. */
1996         if (p->kgd_process_info) {
1997                 ret = amdgpu_amdkfd_gpuvm_restore_process_bos(
1998                         p->kgd_process_info, &p->ef);
1999                 if (ret)
2000                         return ret;
2001         }
2002
2003         ret = kfd_process_restore_queues(p);
2004         if (!ret)
2005                 pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
2006         else
2007                 pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
2008
2009         return ret;
2010 }
2011
2012 static void restore_process_worker(struct work_struct *work)
2013 {
2014         struct delayed_work *dwork;
2015         struct kfd_process *p;
2016         int ret = 0;
2017
2018         dwork = to_delayed_work(work);
2019
2020         /* Process termination destroys this worker thread. So during the
2021          * lifetime of this thread, kfd_process p will be valid
2022          */
2023         p = container_of(dwork, struct kfd_process, restore_work);
2024         pr_debug("Started restoring pasid 0x%x\n", p->pasid);
2025
2026         /* Setting last_restore_timestamp before successful restoration.
2027          * Otherwise this would have to be set by KGD (restore_process_bos)
2028          * before KFD BOs are unreserved. If not, the process can be evicted
2029          * again before the timestamp is set.
2030          * If restore fails, the timestamp will be set again in the next
2031          * attempt. This would mean that the minimum GPU quanta would be
2032          * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
2033          * functions)
2034          */
2035
2036         p->last_restore_timestamp = get_jiffies_64();
2037
2038         ret = restore_process_helper(p);
2039         if (ret) {
2040                 pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
2041                          p->pasid, PROCESS_BACK_OFF_TIME_MS);
2042                 if (mod_delayed_work(kfd_restore_wq, &p->restore_work,
2043                                      msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
2044                         kfd_process_restore_queues(p);
2045         }
2046 }
2047
2048 void kfd_suspend_all_processes(void)
2049 {
2050         struct kfd_process *p;
2051         unsigned int temp;
2052         int idx = srcu_read_lock(&kfd_processes_srcu);
2053
2054         WARN(debug_evictions, "Evicting all processes");
2055         hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2056                 if (kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_TRIGGER_SUSPEND))
2057                         pr_err("Failed to suspend process 0x%x\n", p->pasid);
2058                 signal_eviction_fence(p);
2059         }
2060         srcu_read_unlock(&kfd_processes_srcu, idx);
2061 }
2062
2063 int kfd_resume_all_processes(void)
2064 {
2065         struct kfd_process *p;
2066         unsigned int temp;
2067         int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
2068
2069         hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2070                 if (restore_process_helper(p)) {
2071                         pr_err("Restore process %d failed during resume\n",
2072                                p->pasid);
2073                         ret = -EFAULT;
2074                 }
2075         }
2076         srcu_read_unlock(&kfd_processes_srcu, idx);
2077         return ret;
2078 }
2079
2080 int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process,
2081                           struct vm_area_struct *vma)
2082 {
2083         struct kfd_process_device *pdd;
2084         struct qcm_process_device *qpd;
2085
2086         if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
2087                 dev_err(dev->adev->dev, "Incorrect CWSR mapping size.\n");
2088                 return -EINVAL;
2089         }
2090
2091         pdd = kfd_get_process_device_data(dev, process);
2092         if (!pdd)
2093                 return -EINVAL;
2094         qpd = &pdd->qpd;
2095
2096         qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2097                                         get_order(KFD_CWSR_TBA_TMA_SIZE));
2098         if (!qpd->cwsr_kaddr) {
2099                 dev_err(dev->adev->dev,
2100                         "Error allocating per process CWSR buffer.\n");
2101                 return -ENOMEM;
2102         }
2103
2104         vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND
2105                 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
2106         /* Mapping pages to user process */
2107         return remap_pfn_range(vma, vma->vm_start,
2108                                PFN_DOWN(__pa(qpd->cwsr_kaddr)),
2109                                KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
2110 }
2111
2112 /* assumes caller holds process lock. */
2113 int kfd_process_drain_interrupts(struct kfd_process_device *pdd)
2114 {
2115         uint32_t irq_drain_fence[8];
2116         uint8_t node_id = 0;
2117         int r = 0;
2118
2119         if (!KFD_IS_SOC15(pdd->dev))
2120                 return 0;
2121
2122         pdd->process->irq_drain_is_open = true;
2123
2124         memset(irq_drain_fence, 0, sizeof(irq_drain_fence));
2125         irq_drain_fence[0] = (KFD_IRQ_FENCE_SOURCEID << 8) |
2126                                                         KFD_IRQ_FENCE_CLIENTID;
2127         irq_drain_fence[3] = pdd->process->pasid;
2128
2129         /*
2130          * For GFX 9.4.3, send the NodeId also in IH cookie DW[3]
2131          */
2132         if (KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 3) ||
2133             KFD_GC_VERSION(pdd->dev->kfd) == IP_VERSION(9, 4, 4)) {
2134                 node_id = ffs(pdd->dev->interrupt_bitmap) - 1;
2135                 irq_drain_fence[3] |= node_id << 16;
2136         }
2137
2138         /* ensure stale irqs scheduled KFD interrupts and send drain fence. */
2139         if (amdgpu_amdkfd_send_close_event_drain_irq(pdd->dev->adev,
2140                                                      irq_drain_fence)) {
2141                 pdd->process->irq_drain_is_open = false;
2142                 return 0;
2143         }
2144
2145         r = wait_event_interruptible(pdd->process->wait_irq_drain,
2146                                      !READ_ONCE(pdd->process->irq_drain_is_open));
2147         if (r)
2148                 pdd->process->irq_drain_is_open = false;
2149
2150         return r;
2151 }
2152
2153 void kfd_process_close_interrupt_drain(unsigned int pasid)
2154 {
2155         struct kfd_process *p;
2156
2157         p = kfd_lookup_process_by_pasid(pasid);
2158
2159         if (!p)
2160                 return;
2161
2162         WRITE_ONCE(p->irq_drain_is_open, false);
2163         wake_up_all(&p->wait_irq_drain);
2164         kfd_unref_process(p);
2165 }
2166
2167 struct send_exception_work_handler_workarea {
2168         struct work_struct work;
2169         struct kfd_process *p;
2170         unsigned int queue_id;
2171         uint64_t error_reason;
2172 };
2173
2174 static void send_exception_work_handler(struct work_struct *work)
2175 {
2176         struct send_exception_work_handler_workarea *workarea;
2177         struct kfd_process *p;
2178         struct queue *q;
2179         struct mm_struct *mm;
2180         struct kfd_context_save_area_header __user *csa_header;
2181         uint64_t __user *err_payload_ptr;
2182         uint64_t cur_err;
2183         uint32_t ev_id;
2184
2185         workarea = container_of(work,
2186                                 struct send_exception_work_handler_workarea,
2187                                 work);
2188         p = workarea->p;
2189
2190         mm = get_task_mm(p->lead_thread);
2191
2192         if (!mm)
2193                 return;
2194
2195         kthread_use_mm(mm);
2196
2197         q = pqm_get_user_queue(&p->pqm, workarea->queue_id);
2198
2199         if (!q)
2200                 goto out;
2201
2202         csa_header = (void __user *)q->properties.ctx_save_restore_area_address;
2203
2204         get_user(err_payload_ptr, (uint64_t __user **)&csa_header->err_payload_addr);
2205         get_user(cur_err, err_payload_ptr);
2206         cur_err |= workarea->error_reason;
2207         put_user(cur_err, err_payload_ptr);
2208         get_user(ev_id, &csa_header->err_event_id);
2209
2210         kfd_set_event(p, ev_id);
2211
2212 out:
2213         kthread_unuse_mm(mm);
2214         mmput(mm);
2215 }
2216
2217 int kfd_send_exception_to_runtime(struct kfd_process *p,
2218                         unsigned int queue_id,
2219                         uint64_t error_reason)
2220 {
2221         struct send_exception_work_handler_workarea worker;
2222
2223         INIT_WORK_ONSTACK(&worker.work, send_exception_work_handler);
2224
2225         worker.p = p;
2226         worker.queue_id = queue_id;
2227         worker.error_reason = error_reason;
2228
2229         schedule_work(&worker.work);
2230         flush_work(&worker.work);
2231         destroy_work_on_stack(&worker.work);
2232
2233         return 0;
2234 }
2235
2236 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *p, uint32_t gpu_id)
2237 {
2238         int i;
2239
2240         if (gpu_id) {
2241                 for (i = 0; i < p->n_pdds; i++) {
2242                         struct kfd_process_device *pdd = p->pdds[i];
2243
2244                         if (pdd->user_gpu_id == gpu_id)
2245                                 return pdd;
2246                 }
2247         }
2248         return NULL;
2249 }
2250
2251 int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id)
2252 {
2253         int i;
2254
2255         if (!actual_gpu_id)
2256                 return 0;
2257
2258         for (i = 0; i < p->n_pdds; i++) {
2259                 struct kfd_process_device *pdd = p->pdds[i];
2260
2261                 if (pdd->dev->id == actual_gpu_id)
2262                         return pdd->user_gpu_id;
2263         }
2264         return -EINVAL;
2265 }
2266
2267 #if defined(CONFIG_DEBUG_FS)
2268
2269 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
2270 {
2271         struct kfd_process *p;
2272         unsigned int temp;
2273         int r = 0;
2274
2275         int idx = srcu_read_lock(&kfd_processes_srcu);
2276
2277         hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
2278                 seq_printf(m, "Process %d PASID 0x%x:\n",
2279                            p->lead_thread->tgid, p->pasid);
2280
2281                 mutex_lock(&p->mutex);
2282                 r = pqm_debugfs_mqds(m, &p->pqm);
2283                 mutex_unlock(&p->mutex);
2284
2285                 if (r)
2286                         break;
2287         }
2288
2289         srcu_read_unlock(&kfd_processes_srcu, idx);
2290
2291         return r;
2292 }
2293
2294 #endif
This page took 0.162614 seconds and 4 git commands to generate.