]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdkfd/kfd_process.c
Merge tag 'linux-kselftest-5.5-rc1-fixes2' of git://git.kernel.org/pub/scm/linux...
[linux.git] / drivers / gpu / drm / amd / amdkfd / kfd_process.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/mutex.h>
24 #include <linux/log2.h>
25 #include <linux/sched.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/task.h>
28 #include <linux/slab.h>
29 #include <linux/amd-iommu.h>
30 #include <linux/notifier.h>
31 #include <linux/compat.h>
32 #include <linux/mman.h>
33 #include <linux/file.h>
34 #include "amdgpu_amdkfd.h"
35
36 struct mm_struct;
37
38 #include "kfd_priv.h"
39 #include "kfd_device_queue_manager.h"
40 #include "kfd_dbgmgr.h"
41 #include "kfd_iommu.h"
42
43 /*
44  * List of struct kfd_process (field kfd_process).
45  * Unique/indexed by mm_struct*
46  */
47 DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
48 static DEFINE_MUTEX(kfd_processes_mutex);
49
50 DEFINE_SRCU(kfd_processes_srcu);
51
52 /* For process termination handling */
53 static struct workqueue_struct *kfd_process_wq;
54
55 /* Ordered, single-threaded workqueue for restoring evicted
56  * processes. Restoring multiple processes concurrently under memory
57  * pressure can lead to processes blocking each other from validating
58  * their BOs and result in a live-lock situation where processes
59  * remain evicted indefinitely.
60  */
61 static struct workqueue_struct *kfd_restore_wq;
62
63 static struct kfd_process *find_process(const struct task_struct *thread);
64 static void kfd_process_ref_release(struct kref *ref);
65 static struct kfd_process *create_process(const struct task_struct *thread);
66 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
67
68 static void evict_process_worker(struct work_struct *work);
69 static void restore_process_worker(struct work_struct *work);
70
71 struct kfd_procfs_tree {
72         struct kobject *kobj;
73 };
74
75 static struct kfd_procfs_tree procfs;
76
77 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
78                                char *buffer)
79 {
80         int val = 0;
81
82         if (strcmp(attr->name, "pasid") == 0) {
83                 struct kfd_process *p = container_of(attr, struct kfd_process,
84                                                      attr_pasid);
85                 val = p->pasid;
86         } else {
87                 pr_err("Invalid attribute");
88                 return -EINVAL;
89         }
90
91         return snprintf(buffer, PAGE_SIZE, "%d\n", val);
92 }
93
94 static void kfd_procfs_kobj_release(struct kobject *kobj)
95 {
96         kfree(kobj);
97 }
98
99 static const struct sysfs_ops kfd_procfs_ops = {
100         .show = kfd_procfs_show,
101 };
102
103 static struct kobj_type procfs_type = {
104         .release = kfd_procfs_kobj_release,
105         .sysfs_ops = &kfd_procfs_ops,
106 };
107
108 void kfd_procfs_init(void)
109 {
110         int ret = 0;
111
112         procfs.kobj = kfd_alloc_struct(procfs.kobj);
113         if (!procfs.kobj)
114                 return;
115
116         ret = kobject_init_and_add(procfs.kobj, &procfs_type,
117                                    &kfd_device->kobj, "proc");
118         if (ret) {
119                 pr_warn("Could not create procfs proc folder");
120                 /* If we fail to create the procfs, clean up */
121                 kfd_procfs_shutdown();
122         }
123 }
124
125 void kfd_procfs_shutdown(void)
126 {
127         if (procfs.kobj) {
128                 kobject_del(procfs.kobj);
129                 kobject_put(procfs.kobj);
130                 procfs.kobj = NULL;
131         }
132 }
133
134 int kfd_process_create_wq(void)
135 {
136         if (!kfd_process_wq)
137                 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
138         if (!kfd_restore_wq)
139                 kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);
140
141         if (!kfd_process_wq || !kfd_restore_wq) {
142                 kfd_process_destroy_wq();
143                 return -ENOMEM;
144         }
145
146         return 0;
147 }
148
149 void kfd_process_destroy_wq(void)
150 {
151         if (kfd_process_wq) {
152                 destroy_workqueue(kfd_process_wq);
153                 kfd_process_wq = NULL;
154         }
155         if (kfd_restore_wq) {
156                 destroy_workqueue(kfd_restore_wq);
157                 kfd_restore_wq = NULL;
158         }
159 }
160
161 static void kfd_process_free_gpuvm(struct kgd_mem *mem,
162                         struct kfd_process_device *pdd)
163 {
164         struct kfd_dev *dev = pdd->dev;
165
166         amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
167         amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem);
168 }
169
170 /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
171  *      This function should be only called right after the process
172  *      is created and when kfd_processes_mutex is still being held
173  *      to avoid concurrency. Because of that exclusiveness, we do
174  *      not need to take p->mutex.
175  */
176 static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
177                                    uint64_t gpu_va, uint32_t size,
178                                    uint32_t flags, void **kptr)
179 {
180         struct kfd_dev *kdev = pdd->dev;
181         struct kgd_mem *mem = NULL;
182         int handle;
183         int err;
184
185         err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
186                                                  pdd->vm, &mem, NULL, flags);
187         if (err)
188                 goto err_alloc_mem;
189
190         err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
191         if (err)
192                 goto err_map_mem;
193
194         err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true);
195         if (err) {
196                 pr_debug("Sync memory failed, wait interrupted by user signal\n");
197                 goto sync_memory_failed;
198         }
199
200         /* Create an obj handle so kfd_process_device_remove_obj_handle
201          * will take care of the bo removal when the process finishes.
202          * We do not need to take p->mutex, because the process is just
203          * created and the ioctls have not had the chance to run.
204          */
205         handle = kfd_process_device_create_obj_handle(pdd, mem);
206
207         if (handle < 0) {
208                 err = handle;
209                 goto free_gpuvm;
210         }
211
212         if (kptr) {
213                 err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
214                                 (struct kgd_mem *)mem, kptr, NULL);
215                 if (err) {
216                         pr_debug("Map GTT BO to kernel failed\n");
217                         goto free_obj_handle;
218                 }
219         }
220
221         return err;
222
223 free_obj_handle:
224         kfd_process_device_remove_obj_handle(pdd, handle);
225 free_gpuvm:
226 sync_memory_failed:
227         kfd_process_free_gpuvm(mem, pdd);
228         return err;
229
230 err_map_mem:
231         amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem);
232 err_alloc_mem:
233         *kptr = NULL;
234         return err;
235 }
236
237 /* kfd_process_device_reserve_ib_mem - Reserve memory inside the
238  *      process for IB usage The memory reserved is for KFD to submit
239  *      IB to AMDGPU from kernel.  If the memory is reserved
240  *      successfully, ib_kaddr will have the CPU/kernel
241  *      address. Check ib_kaddr before accessing the memory.
242  */
243 static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
244 {
245         struct qcm_process_device *qpd = &pdd->qpd;
246         uint32_t flags = ALLOC_MEM_FLAGS_GTT |
247                          ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
248                          ALLOC_MEM_FLAGS_WRITABLE |
249                          ALLOC_MEM_FLAGS_EXECUTABLE;
250         void *kaddr;
251         int ret;
252
253         if (qpd->ib_kaddr || !qpd->ib_base)
254                 return 0;
255
256         /* ib_base is only set for dGPU */
257         ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
258                                       &kaddr);
259         if (ret)
260                 return ret;
261
262         qpd->ib_kaddr = kaddr;
263
264         return 0;
265 }
266
267 struct kfd_process *kfd_create_process(struct file *filep)
268 {
269         struct kfd_process *process;
270         struct task_struct *thread = current;
271         int ret;
272
273         if (!thread->mm)
274                 return ERR_PTR(-EINVAL);
275
276         /* Only the pthreads threading model is supported. */
277         if (thread->group_leader->mm != thread->mm)
278                 return ERR_PTR(-EINVAL);
279
280         /*
281          * take kfd processes mutex before starting of process creation
282          * so there won't be a case where two threads of the same process
283          * create two kfd_process structures
284          */
285         mutex_lock(&kfd_processes_mutex);
286
287         /* A prior open of /dev/kfd could have already created the process. */
288         process = find_process(thread);
289         if (process) {
290                 pr_debug("Process already found\n");
291         } else {
292                 process = create_process(thread);
293                 if (IS_ERR(process))
294                         goto out;
295
296                 ret = kfd_process_init_cwsr_apu(process, filep);
297                 if (ret) {
298                         process = ERR_PTR(ret);
299                         goto out;
300                 }
301
302                 if (!procfs.kobj)
303                         goto out;
304
305                 process->kobj = kfd_alloc_struct(process->kobj);
306                 if (!process->kobj) {
307                         pr_warn("Creating procfs kobject failed");
308                         goto out;
309                 }
310                 ret = kobject_init_and_add(process->kobj, &procfs_type,
311                                            procfs.kobj, "%d",
312                                            (int)process->lead_thread->pid);
313                 if (ret) {
314                         pr_warn("Creating procfs pid directory failed");
315                         goto out;
316                 }
317
318                 process->attr_pasid.name = "pasid";
319                 process->attr_pasid.mode = KFD_SYSFS_FILE_MODE;
320                 sysfs_attr_init(&process->attr_pasid);
321                 ret = sysfs_create_file(process->kobj, &process->attr_pasid);
322                 if (ret)
323                         pr_warn("Creating pasid for pid %d failed",
324                                         (int)process->lead_thread->pid);
325         }
326 out:
327         mutex_unlock(&kfd_processes_mutex);
328
329         return process;
330 }
331
332 struct kfd_process *kfd_get_process(const struct task_struct *thread)
333 {
334         struct kfd_process *process;
335
336         if (!thread->mm)
337                 return ERR_PTR(-EINVAL);
338
339         /* Only the pthreads threading model is supported. */
340         if (thread->group_leader->mm != thread->mm)
341                 return ERR_PTR(-EINVAL);
342
343         process = find_process(thread);
344         if (!process)
345                 return ERR_PTR(-EINVAL);
346
347         return process;
348 }
349
350 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
351 {
352         struct kfd_process *process;
353
354         hash_for_each_possible_rcu(kfd_processes_table, process,
355                                         kfd_processes, (uintptr_t)mm)
356                 if (process->mm == mm)
357                         return process;
358
359         return NULL;
360 }
361
362 static struct kfd_process *find_process(const struct task_struct *thread)
363 {
364         struct kfd_process *p;
365         int idx;
366
367         idx = srcu_read_lock(&kfd_processes_srcu);
368         p = find_process_by_mm(thread->mm);
369         srcu_read_unlock(&kfd_processes_srcu, idx);
370
371         return p;
372 }
373
374 void kfd_unref_process(struct kfd_process *p)
375 {
376         kref_put(&p->ref, kfd_process_ref_release);
377 }
378
379 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
380 {
381         struct kfd_process *p = pdd->process;
382         void *mem;
383         int id;
384
385         /*
386          * Remove all handles from idr and release appropriate
387          * local memory object
388          */
389         idr_for_each_entry(&pdd->alloc_idr, mem, id) {
390                 struct kfd_process_device *peer_pdd;
391
392                 list_for_each_entry(peer_pdd, &p->per_device_data,
393                                     per_device_list) {
394                         if (!peer_pdd->vm)
395                                 continue;
396                         amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
397                                 peer_pdd->dev->kgd, mem, peer_pdd->vm);
398                 }
399
400                 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem);
401                 kfd_process_device_remove_obj_handle(pdd, id);
402         }
403 }
404
405 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
406 {
407         struct kfd_process_device *pdd;
408
409         list_for_each_entry(pdd, &p->per_device_data, per_device_list)
410                 kfd_process_device_free_bos(pdd);
411 }
412
413 static void kfd_process_destroy_pdds(struct kfd_process *p)
414 {
415         struct kfd_process_device *pdd, *temp;
416
417         list_for_each_entry_safe(pdd, temp, &p->per_device_data,
418                                  per_device_list) {
419                 pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
420                                 pdd->dev->id, p->pasid);
421
422                 if (pdd->drm_file) {
423                         amdgpu_amdkfd_gpuvm_release_process_vm(
424                                         pdd->dev->kgd, pdd->vm);
425                         fput(pdd->drm_file);
426                 }
427                 else if (pdd->vm)
428                         amdgpu_amdkfd_gpuvm_destroy_process_vm(
429                                 pdd->dev->kgd, pdd->vm);
430
431                 list_del(&pdd->per_device_list);
432
433                 if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
434                         free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
435                                 get_order(KFD_CWSR_TBA_TMA_SIZE));
436
437                 kfree(pdd->qpd.doorbell_bitmap);
438                 idr_destroy(&pdd->alloc_idr);
439
440                 kfree(pdd);
441         }
442 }
443
444 /* No process locking is needed in this function, because the process
445  * is not findable any more. We must assume that no other thread is
446  * using it any more, otherwise we couldn't safely free the process
447  * structure in the end.
448  */
449 static void kfd_process_wq_release(struct work_struct *work)
450 {
451         struct kfd_process *p = container_of(work, struct kfd_process,
452                                              release_work);
453
454         /* Remove the procfs files */
455         if (p->kobj) {
456                 sysfs_remove_file(p->kobj, &p->attr_pasid);
457                 kobject_del(p->kobj);
458                 kobject_put(p->kobj);
459                 p->kobj = NULL;
460         }
461
462         kfd_iommu_unbind_process(p);
463
464         kfd_process_free_outstanding_kfd_bos(p);
465
466         kfd_process_destroy_pdds(p);
467         dma_fence_put(p->ef);
468
469         kfd_event_free_process(p);
470
471         kfd_pasid_free(p->pasid);
472         kfd_free_process_doorbells(p);
473
474         mutex_destroy(&p->mutex);
475
476         put_task_struct(p->lead_thread);
477
478         kfree(p);
479 }
480
481 static void kfd_process_ref_release(struct kref *ref)
482 {
483         struct kfd_process *p = container_of(ref, struct kfd_process, ref);
484
485         INIT_WORK(&p->release_work, kfd_process_wq_release);
486         queue_work(kfd_process_wq, &p->release_work);
487 }
488
489 static void kfd_process_free_notifier(struct mmu_notifier *mn)
490 {
491         kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
492 }
493
494 static void kfd_process_notifier_release(struct mmu_notifier *mn,
495                                         struct mm_struct *mm)
496 {
497         struct kfd_process *p;
498         struct kfd_process_device *pdd = NULL;
499
500         /*
501          * The kfd_process structure can not be free because the
502          * mmu_notifier srcu is read locked
503          */
504         p = container_of(mn, struct kfd_process, mmu_notifier);
505         if (WARN_ON(p->mm != mm))
506                 return;
507
508         mutex_lock(&kfd_processes_mutex);
509         hash_del_rcu(&p->kfd_processes);
510         mutex_unlock(&kfd_processes_mutex);
511         synchronize_srcu(&kfd_processes_srcu);
512
513         cancel_delayed_work_sync(&p->eviction_work);
514         cancel_delayed_work_sync(&p->restore_work);
515
516         mutex_lock(&p->mutex);
517
518         /* Iterate over all process device data structures and if the
519          * pdd is in debug mode, we should first force unregistration,
520          * then we will be able to destroy the queues
521          */
522         list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
523                 struct kfd_dev *dev = pdd->dev;
524
525                 mutex_lock(kfd_get_dbgmgr_mutex());
526                 if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
527                         if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
528                                 kfd_dbgmgr_destroy(dev->dbgmgr);
529                                 dev->dbgmgr = NULL;
530                         }
531                 }
532                 mutex_unlock(kfd_get_dbgmgr_mutex());
533         }
534
535         kfd_process_dequeue_from_all_devices(p);
536         pqm_uninit(&p->pqm);
537
538         /* Indicate to other users that MM is no longer valid */
539         p->mm = NULL;
540
541         mutex_unlock(&p->mutex);
542
543         mmu_notifier_put(&p->mmu_notifier);
544 }
545
546 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
547         .release = kfd_process_notifier_release,
548         .free_notifier = kfd_process_free_notifier,
549 };
550
551 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
552 {
553         unsigned long  offset;
554         struct kfd_process_device *pdd;
555
556         list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
557                 struct kfd_dev *dev = pdd->dev;
558                 struct qcm_process_device *qpd = &pdd->qpd;
559
560                 if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
561                         continue;
562
563                 offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id))
564                         << PAGE_SHIFT;
565                 qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
566                         KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
567                         MAP_SHARED, offset);
568
569                 if (IS_ERR_VALUE(qpd->tba_addr)) {
570                         int err = qpd->tba_addr;
571
572                         pr_err("Failure to set tba address. error %d.\n", err);
573                         qpd->tba_addr = 0;
574                         qpd->cwsr_kaddr = NULL;
575                         return err;
576                 }
577
578                 memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
579
580                 qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
581                 pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
582                         qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
583         }
584
585         return 0;
586 }
587
588 static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
589 {
590         struct kfd_dev *dev = pdd->dev;
591         struct qcm_process_device *qpd = &pdd->qpd;
592         uint32_t flags = ALLOC_MEM_FLAGS_GTT |
593                 ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
594         void *kaddr;
595         int ret;
596
597         if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
598                 return 0;
599
600         /* cwsr_base is only set for dGPU */
601         ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
602                                       KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr);
603         if (ret)
604                 return ret;
605
606         qpd->cwsr_kaddr = kaddr;
607         qpd->tba_addr = qpd->cwsr_base;
608
609         memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
610
611         qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
612         pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
613                  qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
614
615         return 0;
616 }
617
618 /*
619  * On return the kfd_process is fully operational and will be freed when the
620  * mm is released
621  */
622 static struct kfd_process *create_process(const struct task_struct *thread)
623 {
624         struct kfd_process *process;
625         int err = -ENOMEM;
626
627         process = kzalloc(sizeof(*process), GFP_KERNEL);
628         if (!process)
629                 goto err_alloc_process;
630
631         kref_init(&process->ref);
632         mutex_init(&process->mutex);
633         process->mm = thread->mm;
634         process->lead_thread = thread->group_leader;
635         INIT_LIST_HEAD(&process->per_device_data);
636         INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
637         INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
638         process->last_restore_timestamp = get_jiffies_64();
639         kfd_event_init_process(process);
640         process->is_32bit_user_mode = in_compat_syscall();
641
642         process->pasid = kfd_pasid_alloc();
643         if (process->pasid == 0)
644                 goto err_alloc_pasid;
645
646         if (kfd_alloc_process_doorbells(process) < 0)
647                 goto err_alloc_doorbells;
648
649         err = pqm_init(&process->pqm, process);
650         if (err != 0)
651                 goto err_process_pqm_init;
652
653         /* init process apertures*/
654         err = kfd_init_apertures(process);
655         if (err != 0)
656                 goto err_init_apertures;
657
658         /* Must be last, have to use release destruction after this */
659         process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
660         err = mmu_notifier_register(&process->mmu_notifier, process->mm);
661         if (err)
662                 goto err_register_notifier;
663
664         get_task_struct(process->lead_thread);
665         hash_add_rcu(kfd_processes_table, &process->kfd_processes,
666                         (uintptr_t)process->mm);
667
668         return process;
669
670 err_register_notifier:
671         kfd_process_free_outstanding_kfd_bos(process);
672         kfd_process_destroy_pdds(process);
673 err_init_apertures:
674         pqm_uninit(&process->pqm);
675 err_process_pqm_init:
676         kfd_free_process_doorbells(process);
677 err_alloc_doorbells:
678         kfd_pasid_free(process->pasid);
679 err_alloc_pasid:
680         mutex_destroy(&process->mutex);
681         kfree(process);
682 err_alloc_process:
683         return ERR_PTR(err);
684 }
685
686 static int init_doorbell_bitmap(struct qcm_process_device *qpd,
687                         struct kfd_dev *dev)
688 {
689         unsigned int i;
690         int range_start = dev->shared_resources.non_cp_doorbells_start;
691         int range_end = dev->shared_resources.non_cp_doorbells_end;
692
693         if (!KFD_IS_SOC15(dev->device_info->asic_family))
694                 return 0;
695
696         qpd->doorbell_bitmap =
697                 kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
698                                      BITS_PER_BYTE), GFP_KERNEL);
699         if (!qpd->doorbell_bitmap)
700                 return -ENOMEM;
701
702         /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
703         pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
704         pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
705                         range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
706                         range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
707
708         for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
709                 if (i >= range_start && i <= range_end) {
710                         set_bit(i, qpd->doorbell_bitmap);
711                         set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
712                                 qpd->doorbell_bitmap);
713                 }
714         }
715
716         return 0;
717 }
718
719 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
720                                                         struct kfd_process *p)
721 {
722         struct kfd_process_device *pdd = NULL;
723
724         list_for_each_entry(pdd, &p->per_device_data, per_device_list)
725                 if (pdd->dev == dev)
726                         return pdd;
727
728         return NULL;
729 }
730
731 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
732                                                         struct kfd_process *p)
733 {
734         struct kfd_process_device *pdd = NULL;
735
736         pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
737         if (!pdd)
738                 return NULL;
739
740         if (init_doorbell_bitmap(&pdd->qpd, dev)) {
741                 pr_err("Failed to init doorbell for process\n");
742                 kfree(pdd);
743                 return NULL;
744         }
745
746         pdd->dev = dev;
747         INIT_LIST_HEAD(&pdd->qpd.queues_list);
748         INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
749         pdd->qpd.dqm = dev->dqm;
750         pdd->qpd.pqm = &p->pqm;
751         pdd->qpd.evicted = 0;
752         pdd->process = p;
753         pdd->bound = PDD_UNBOUND;
754         pdd->already_dequeued = false;
755         list_add(&pdd->per_device_list, &p->per_device_data);
756
757         /* Init idr used for memory handle translation */
758         idr_init(&pdd->alloc_idr);
759
760         return pdd;
761 }
762
763 /**
764  * kfd_process_device_init_vm - Initialize a VM for a process-device
765  *
766  * @pdd: The process-device
767  * @drm_file: Optional pointer to a DRM file descriptor
768  *
769  * If @drm_file is specified, it will be used to acquire the VM from
770  * that file descriptor. If successful, the @pdd takes ownership of
771  * the file descriptor.
772  *
773  * If @drm_file is NULL, a new VM is created.
774  *
775  * Returns 0 on success, -errno on failure.
776  */
777 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
778                                struct file *drm_file)
779 {
780         struct kfd_process *p;
781         struct kfd_dev *dev;
782         int ret;
783
784         if (pdd->vm)
785                 return drm_file ? -EBUSY : 0;
786
787         p = pdd->process;
788         dev = pdd->dev;
789
790         if (drm_file)
791                 ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
792                         dev->kgd, drm_file, p->pasid,
793                         &pdd->vm, &p->kgd_process_info, &p->ef);
794         else
795                 ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid,
796                         &pdd->vm, &p->kgd_process_info, &p->ef);
797         if (ret) {
798                 pr_err("Failed to create process VM object\n");
799                 return ret;
800         }
801
802         amdgpu_vm_set_task_info(pdd->vm);
803
804         ret = kfd_process_device_reserve_ib_mem(pdd);
805         if (ret)
806                 goto err_reserve_ib_mem;
807         ret = kfd_process_device_init_cwsr_dgpu(pdd);
808         if (ret)
809                 goto err_init_cwsr;
810
811         pdd->drm_file = drm_file;
812
813         return 0;
814
815 err_init_cwsr:
816 err_reserve_ib_mem:
817         kfd_process_device_free_bos(pdd);
818         if (!drm_file)
819                 amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm);
820         pdd->vm = NULL;
821
822         return ret;
823 }
824
825 /*
826  * Direct the IOMMU to bind the process (specifically the pasid->mm)
827  * to the device.
828  * Unbinding occurs when the process dies or the device is removed.
829  *
830  * Assumes that the process lock is held.
831  */
832 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
833                                                         struct kfd_process *p)
834 {
835         struct kfd_process_device *pdd;
836         int err;
837
838         pdd = kfd_get_process_device_data(dev, p);
839         if (!pdd) {
840                 pr_err("Process device data doesn't exist\n");
841                 return ERR_PTR(-ENOMEM);
842         }
843
844         err = kfd_iommu_bind_process_to_device(pdd);
845         if (err)
846                 return ERR_PTR(err);
847
848         err = kfd_process_device_init_vm(pdd, NULL);
849         if (err)
850                 return ERR_PTR(err);
851
852         return pdd;
853 }
854
855 struct kfd_process_device *kfd_get_first_process_device_data(
856                                                 struct kfd_process *p)
857 {
858         return list_first_entry(&p->per_device_data,
859                                 struct kfd_process_device,
860                                 per_device_list);
861 }
862
863 struct kfd_process_device *kfd_get_next_process_device_data(
864                                                 struct kfd_process *p,
865                                                 struct kfd_process_device *pdd)
866 {
867         if (list_is_last(&pdd->per_device_list, &p->per_device_data))
868                 return NULL;
869         return list_next_entry(pdd, per_device_list);
870 }
871
872 bool kfd_has_process_device_data(struct kfd_process *p)
873 {
874         return !(list_empty(&p->per_device_data));
875 }
876
877 /* Create specific handle mapped to mem from process local memory idr
878  * Assumes that the process lock is held.
879  */
880 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
881                                         void *mem)
882 {
883         return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
884 }
885
886 /* Translate specific handle from process local memory idr
887  * Assumes that the process lock is held.
888  */
889 void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
890                                         int handle)
891 {
892         if (handle < 0)
893                 return NULL;
894
895         return idr_find(&pdd->alloc_idr, handle);
896 }
897
898 /* Remove specific handle from process local memory idr
899  * Assumes that the process lock is held.
900  */
901 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
902                                         int handle)
903 {
904         if (handle >= 0)
905                 idr_remove(&pdd->alloc_idr, handle);
906 }
907
908 /* This increments the process->ref counter. */
909 struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
910 {
911         struct kfd_process *p, *ret_p = NULL;
912         unsigned int temp;
913
914         int idx = srcu_read_lock(&kfd_processes_srcu);
915
916         hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
917                 if (p->pasid == pasid) {
918                         kref_get(&p->ref);
919                         ret_p = p;
920                         break;
921                 }
922         }
923
924         srcu_read_unlock(&kfd_processes_srcu, idx);
925
926         return ret_p;
927 }
928
929 /* This increments the process->ref counter. */
930 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
931 {
932         struct kfd_process *p;
933
934         int idx = srcu_read_lock(&kfd_processes_srcu);
935
936         p = find_process_by_mm(mm);
937         if (p)
938                 kref_get(&p->ref);
939
940         srcu_read_unlock(&kfd_processes_srcu, idx);
941
942         return p;
943 }
944
945 /* process_evict_queues - Evict all user queues of a process
946  *
947  * Eviction is reference-counted per process-device. This means multiple
948  * evictions from different sources can be nested safely.
949  */
950 int kfd_process_evict_queues(struct kfd_process *p)
951 {
952         struct kfd_process_device *pdd;
953         int r = 0;
954         unsigned int n_evicted = 0;
955
956         list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
957                 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
958                                                             &pdd->qpd);
959                 if (r) {
960                         pr_err("Failed to evict process queues\n");
961                         goto fail;
962                 }
963                 n_evicted++;
964         }
965
966         return r;
967
968 fail:
969         /* To keep state consistent, roll back partial eviction by
970          * restoring queues
971          */
972         list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
973                 if (n_evicted == 0)
974                         break;
975                 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
976                                                               &pdd->qpd))
977                         pr_err("Failed to restore queues\n");
978
979                 n_evicted--;
980         }
981
982         return r;
983 }
984
985 /* process_restore_queues - Restore all user queues of a process */
986 int kfd_process_restore_queues(struct kfd_process *p)
987 {
988         struct kfd_process_device *pdd;
989         int r, ret = 0;
990
991         list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
992                 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
993                                                               &pdd->qpd);
994                 if (r) {
995                         pr_err("Failed to restore process queues\n");
996                         if (!ret)
997                                 ret = r;
998                 }
999         }
1000
1001         return ret;
1002 }
1003
1004 static void evict_process_worker(struct work_struct *work)
1005 {
1006         int ret;
1007         struct kfd_process *p;
1008         struct delayed_work *dwork;
1009
1010         dwork = to_delayed_work(work);
1011
1012         /* Process termination destroys this worker thread. So during the
1013          * lifetime of this thread, kfd_process p will be valid
1014          */
1015         p = container_of(dwork, struct kfd_process, eviction_work);
1016         WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
1017                   "Eviction fence mismatch\n");
1018
1019         /* Narrow window of overlap between restore and evict work
1020          * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
1021          * unreserves KFD BOs, it is possible to evicted again. But
1022          * restore has few more steps of finish. So lets wait for any
1023          * previous restore work to complete
1024          */
1025         flush_delayed_work(&p->restore_work);
1026
1027         pr_debug("Started evicting pasid 0x%x\n", p->pasid);
1028         ret = kfd_process_evict_queues(p);
1029         if (!ret) {
1030                 dma_fence_signal(p->ef);
1031                 dma_fence_put(p->ef);
1032                 p->ef = NULL;
1033                 queue_delayed_work(kfd_restore_wq, &p->restore_work,
1034                                 msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
1035
1036                 pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
1037         } else
1038                 pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
1039 }
1040
1041 static void restore_process_worker(struct work_struct *work)
1042 {
1043         struct delayed_work *dwork;
1044         struct kfd_process *p;
1045         int ret = 0;
1046
1047         dwork = to_delayed_work(work);
1048
1049         /* Process termination destroys this worker thread. So during the
1050          * lifetime of this thread, kfd_process p will be valid
1051          */
1052         p = container_of(dwork, struct kfd_process, restore_work);
1053         pr_debug("Started restoring pasid 0x%x\n", p->pasid);
1054
1055         /* Setting last_restore_timestamp before successful restoration.
1056          * Otherwise this would have to be set by KGD (restore_process_bos)
1057          * before KFD BOs are unreserved. If not, the process can be evicted
1058          * again before the timestamp is set.
1059          * If restore fails, the timestamp will be set again in the next
1060          * attempt. This would mean that the minimum GPU quanta would be
1061          * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
1062          * functions)
1063          */
1064
1065         p->last_restore_timestamp = get_jiffies_64();
1066         ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
1067                                                      &p->ef);
1068         if (ret) {
1069                 pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
1070                          p->pasid, PROCESS_BACK_OFF_TIME_MS);
1071                 ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
1072                                 msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
1073                 WARN(!ret, "reschedule restore work failed\n");
1074                 return;
1075         }
1076
1077         ret = kfd_process_restore_queues(p);
1078         if (!ret)
1079                 pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
1080         else
1081                 pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
1082 }
1083
1084 void kfd_suspend_all_processes(void)
1085 {
1086         struct kfd_process *p;
1087         unsigned int temp;
1088         int idx = srcu_read_lock(&kfd_processes_srcu);
1089
1090         hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1091                 cancel_delayed_work_sync(&p->eviction_work);
1092                 cancel_delayed_work_sync(&p->restore_work);
1093
1094                 if (kfd_process_evict_queues(p))
1095                         pr_err("Failed to suspend process 0x%x\n", p->pasid);
1096                 dma_fence_signal(p->ef);
1097                 dma_fence_put(p->ef);
1098                 p->ef = NULL;
1099         }
1100         srcu_read_unlock(&kfd_processes_srcu, idx);
1101 }
1102
1103 int kfd_resume_all_processes(void)
1104 {
1105         struct kfd_process *p;
1106         unsigned int temp;
1107         int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);
1108
1109         hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1110                 if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
1111                         pr_err("Restore process %d failed during resume\n",
1112                                p->pasid);
1113                         ret = -EFAULT;
1114                 }
1115         }
1116         srcu_read_unlock(&kfd_processes_srcu, idx);
1117         return ret;
1118 }
1119
1120 int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
1121                           struct vm_area_struct *vma)
1122 {
1123         struct kfd_process_device *pdd;
1124         struct qcm_process_device *qpd;
1125
1126         if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
1127                 pr_err("Incorrect CWSR mapping size.\n");
1128                 return -EINVAL;
1129         }
1130
1131         pdd = kfd_get_process_device_data(dev, process);
1132         if (!pdd)
1133                 return -EINVAL;
1134         qpd = &pdd->qpd;
1135
1136         qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1137                                         get_order(KFD_CWSR_TBA_TMA_SIZE));
1138         if (!qpd->cwsr_kaddr) {
1139                 pr_err("Error allocating per process CWSR buffer.\n");
1140                 return -ENOMEM;
1141         }
1142
1143         vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
1144                 | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
1145         /* Mapping pages to user process */
1146         return remap_pfn_range(vma, vma->vm_start,
1147                                PFN_DOWN(__pa(qpd->cwsr_kaddr)),
1148                                KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
1149 }
1150
1151 void kfd_flush_tlb(struct kfd_process_device *pdd)
1152 {
1153         struct kfd_dev *dev = pdd->dev;
1154         const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
1155
1156         if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
1157                 /* Nothing to flush until a VMID is assigned, which
1158                  * only happens when the first queue is created.
1159                  */
1160                 if (pdd->qpd.vmid)
1161                         f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid);
1162         } else {
1163                 f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid);
1164         }
1165 }
1166
1167 #if defined(CONFIG_DEBUG_FS)
1168
1169 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
1170 {
1171         struct kfd_process *p;
1172         unsigned int temp;
1173         int r = 0;
1174
1175         int idx = srcu_read_lock(&kfd_processes_srcu);
1176
1177         hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1178                 seq_printf(m, "Process %d PASID 0x%x:\n",
1179                            p->lead_thread->tgid, p->pasid);
1180
1181                 mutex_lock(&p->mutex);
1182                 r = pqm_debugfs_mqds(m, &p->pqm);
1183                 mutex_unlock(&p->mutex);
1184
1185                 if (r)
1186                         break;
1187         }
1188
1189         srcu_read_unlock(&kfd_processes_srcu, idx);
1190
1191         return r;
1192 }
1193
1194 #endif
1195
This page took 0.105936 seconds and 4 git commands to generate.