]> Git Repo - linux.git/blob - drivers/gpu/drm/panthor/panthor_device.c
Merge tag 'cxl-for-6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl
[linux.git] / drivers / gpu / drm / panthor / panthor_device.c
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2018 Marty E. Plummer <[email protected]> */
3 /* Copyright 2019 Linaro, Ltd, Rob Herring <[email protected]> */
4 /* Copyright 2023 Collabora ltd. */
5
6 #include <linux/clk.h>
7 #include <linux/mm.h>
8 #include <linux/platform_device.h>
9 #include <linux/pm_domain.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/regulator/consumer.h>
12 #include <linux/reset.h>
13
14 #include <drm/drm_drv.h>
15 #include <drm/drm_managed.h>
16
17 #include "panthor_devfreq.h"
18 #include "panthor_device.h"
19 #include "panthor_fw.h"
20 #include "panthor_gpu.h"
21 #include "panthor_mmu.h"
22 #include "panthor_regs.h"
23 #include "panthor_sched.h"
24
25 static int panthor_clk_init(struct panthor_device *ptdev)
26 {
27         ptdev->clks.core = devm_clk_get(ptdev->base.dev, NULL);
28         if (IS_ERR(ptdev->clks.core))
29                 return dev_err_probe(ptdev->base.dev,
30                                      PTR_ERR(ptdev->clks.core),
31                                      "get 'core' clock failed");
32
33         ptdev->clks.stacks = devm_clk_get_optional(ptdev->base.dev, "stacks");
34         if (IS_ERR(ptdev->clks.stacks))
35                 return dev_err_probe(ptdev->base.dev,
36                                      PTR_ERR(ptdev->clks.stacks),
37                                      "get 'stacks' clock failed");
38
39         ptdev->clks.coregroup = devm_clk_get_optional(ptdev->base.dev, "coregroup");
40         if (IS_ERR(ptdev->clks.coregroup))
41                 return dev_err_probe(ptdev->base.dev,
42                                      PTR_ERR(ptdev->clks.coregroup),
43                                      "get 'coregroup' clock failed");
44
45         drm_info(&ptdev->base, "clock rate = %lu\n", clk_get_rate(ptdev->clks.core));
46         return 0;
47 }
48
49 void panthor_device_unplug(struct panthor_device *ptdev)
50 {
51         /* This function can be called from two different path: the reset work
52          * and the platform device remove callback. drm_dev_unplug() doesn't
53          * deal with concurrent callers, so we have to protect drm_dev_unplug()
54          * calls with our own lock, and bail out if the device is already
55          * unplugged.
56          */
57         mutex_lock(&ptdev->unplug.lock);
58         if (drm_dev_is_unplugged(&ptdev->base)) {
59                 /* Someone beat us, release the lock and wait for the unplug
60                  * operation to be reported as done.
61                  **/
62                 mutex_unlock(&ptdev->unplug.lock);
63                 wait_for_completion(&ptdev->unplug.done);
64                 return;
65         }
66
67         /* Call drm_dev_unplug() so any access to HW blocks happening after
68          * that point get rejected.
69          */
70         drm_dev_unplug(&ptdev->base);
71
72         /* We do the rest of the unplug with the unplug lock released,
73          * future callers will wait on ptdev->unplug.done anyway.
74          */
75         mutex_unlock(&ptdev->unplug.lock);
76
77         drm_WARN_ON(&ptdev->base, pm_runtime_get_sync(ptdev->base.dev) < 0);
78
79         /* Now, try to cleanly shutdown the GPU before the device resources
80          * get reclaimed.
81          */
82         panthor_sched_unplug(ptdev);
83         panthor_fw_unplug(ptdev);
84         panthor_mmu_unplug(ptdev);
85         panthor_gpu_unplug(ptdev);
86
87         pm_runtime_dont_use_autosuspend(ptdev->base.dev);
88         pm_runtime_put_sync_suspend(ptdev->base.dev);
89
90         /* If PM is disabled, we need to call the suspend handler manually. */
91         if (!IS_ENABLED(CONFIG_PM))
92                 panthor_device_suspend(ptdev->base.dev);
93
94         /* Report the unplug operation as done to unblock concurrent
95          * panthor_device_unplug() callers.
96          */
97         complete_all(&ptdev->unplug.done);
98 }
99
100 static void panthor_device_reset_cleanup(struct drm_device *ddev, void *data)
101 {
102         struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
103
104         cancel_work_sync(&ptdev->reset.work);
105         destroy_workqueue(ptdev->reset.wq);
106 }
107
108 static void panthor_device_reset_work(struct work_struct *work)
109 {
110         struct panthor_device *ptdev = container_of(work, struct panthor_device, reset.work);
111         int ret = 0, cookie;
112
113         if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE) {
114                 /*
115                  * No need for a reset as the device has been (or will be)
116                  * powered down
117                  */
118                 atomic_set(&ptdev->reset.pending, 0);
119                 return;
120         }
121
122         if (!drm_dev_enter(&ptdev->base, &cookie))
123                 return;
124
125         panthor_sched_pre_reset(ptdev);
126         panthor_fw_pre_reset(ptdev, true);
127         panthor_mmu_pre_reset(ptdev);
128         panthor_gpu_soft_reset(ptdev);
129         panthor_gpu_l2_power_on(ptdev);
130         panthor_mmu_post_reset(ptdev);
131         ret = panthor_fw_post_reset(ptdev);
132         if (ret)
133                 goto out_dev_exit;
134
135         atomic_set(&ptdev->reset.pending, 0);
136         panthor_sched_post_reset(ptdev);
137
138 out_dev_exit:
139         drm_dev_exit(cookie);
140
141         if (ret) {
142                 panthor_device_unplug(ptdev);
143                 drm_err(&ptdev->base, "Failed to boot MCU after reset, making device unusable.");
144         }
145 }
146
147 static bool panthor_device_is_initialized(struct panthor_device *ptdev)
148 {
149         return !!ptdev->scheduler;
150 }
151
152 static void panthor_device_free_page(struct drm_device *ddev, void *data)
153 {
154         __free_page(data);
155 }
156
157 int panthor_device_init(struct panthor_device *ptdev)
158 {
159         u32 *dummy_page_virt;
160         struct resource *res;
161         struct page *p;
162         int ret;
163
164         ptdev->coherent = device_get_dma_attr(ptdev->base.dev) == DEV_DMA_COHERENT;
165
166         init_completion(&ptdev->unplug.done);
167         ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
168         if (ret)
169                 return ret;
170
171         ret = drmm_mutex_init(&ptdev->base, &ptdev->pm.mmio_lock);
172         if (ret)
173                 return ret;
174
175         atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
176         p = alloc_page(GFP_KERNEL | __GFP_ZERO);
177         if (!p)
178                 return -ENOMEM;
179
180         ptdev->pm.dummy_latest_flush = p;
181         dummy_page_virt = page_address(p);
182         ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_free_page,
183                                        ptdev->pm.dummy_latest_flush);
184         if (ret)
185                 return ret;
186
187         /*
188          * Set the dummy page holding the latest flush to 1. This will cause the
189          * flush to avoided as we know it isn't necessary if the submission
190          * happens while the dummy page is mapped. Zero cannot be used because
191          * that means 'always flush'.
192          */
193         *dummy_page_virt = 1;
194
195         INIT_WORK(&ptdev->reset.work, panthor_device_reset_work);
196         ptdev->reset.wq = alloc_ordered_workqueue("panthor-reset-wq", 0);
197         if (!ptdev->reset.wq)
198                 return -ENOMEM;
199
200         ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_reset_cleanup, NULL);
201         if (ret)
202                 return ret;
203
204         ret = panthor_clk_init(ptdev);
205         if (ret)
206                 return ret;
207
208         ret = panthor_devfreq_init(ptdev);
209         if (ret)
210                 return ret;
211
212         ptdev->iomem = devm_platform_get_and_ioremap_resource(to_platform_device(ptdev->base.dev),
213                                                               0, &res);
214         if (IS_ERR(ptdev->iomem))
215                 return PTR_ERR(ptdev->iomem);
216
217         ptdev->phys_addr = res->start;
218
219         ret = devm_pm_runtime_enable(ptdev->base.dev);
220         if (ret)
221                 return ret;
222
223         ret = pm_runtime_resume_and_get(ptdev->base.dev);
224         if (ret)
225                 return ret;
226
227         /* If PM is disabled, we need to call panthor_device_resume() manually. */
228         if (!IS_ENABLED(CONFIG_PM)) {
229                 ret = panthor_device_resume(ptdev->base.dev);
230                 if (ret)
231                         return ret;
232         }
233
234         ret = panthor_gpu_init(ptdev);
235         if (ret)
236                 goto err_rpm_put;
237
238         ret = panthor_mmu_init(ptdev);
239         if (ret)
240                 goto err_unplug_gpu;
241
242         ret = panthor_fw_init(ptdev);
243         if (ret)
244                 goto err_unplug_mmu;
245
246         ret = panthor_sched_init(ptdev);
247         if (ret)
248                 goto err_unplug_fw;
249
250         /* ~3 frames */
251         pm_runtime_set_autosuspend_delay(ptdev->base.dev, 50);
252         pm_runtime_use_autosuspend(ptdev->base.dev);
253
254         ret = drm_dev_register(&ptdev->base, 0);
255         if (ret)
256                 goto err_disable_autosuspend;
257
258         pm_runtime_put_autosuspend(ptdev->base.dev);
259         return 0;
260
261 err_disable_autosuspend:
262         pm_runtime_dont_use_autosuspend(ptdev->base.dev);
263         panthor_sched_unplug(ptdev);
264
265 err_unplug_fw:
266         panthor_fw_unplug(ptdev);
267
268 err_unplug_mmu:
269         panthor_mmu_unplug(ptdev);
270
271 err_unplug_gpu:
272         panthor_gpu_unplug(ptdev);
273
274 err_rpm_put:
275         pm_runtime_put_sync_suspend(ptdev->base.dev);
276         return ret;
277 }
278
279 #define PANTHOR_EXCEPTION(id) \
280         [DRM_PANTHOR_EXCEPTION_ ## id] = { \
281                 .name = #id, \
282         }
283
284 struct panthor_exception_info {
285         const char *name;
286 };
287
288 static const struct panthor_exception_info panthor_exception_infos[] = {
289         PANTHOR_EXCEPTION(OK),
290         PANTHOR_EXCEPTION(TERMINATED),
291         PANTHOR_EXCEPTION(KABOOM),
292         PANTHOR_EXCEPTION(EUREKA),
293         PANTHOR_EXCEPTION(ACTIVE),
294         PANTHOR_EXCEPTION(CS_RES_TERM),
295         PANTHOR_EXCEPTION(CS_CONFIG_FAULT),
296         PANTHOR_EXCEPTION(CS_ENDPOINT_FAULT),
297         PANTHOR_EXCEPTION(CS_BUS_FAULT),
298         PANTHOR_EXCEPTION(CS_INSTR_INVALID),
299         PANTHOR_EXCEPTION(CS_CALL_STACK_OVERFLOW),
300         PANTHOR_EXCEPTION(CS_INHERIT_FAULT),
301         PANTHOR_EXCEPTION(INSTR_INVALID_PC),
302         PANTHOR_EXCEPTION(INSTR_INVALID_ENC),
303         PANTHOR_EXCEPTION(INSTR_BARRIER_FAULT),
304         PANTHOR_EXCEPTION(DATA_INVALID_FAULT),
305         PANTHOR_EXCEPTION(TILE_RANGE_FAULT),
306         PANTHOR_EXCEPTION(ADDR_RANGE_FAULT),
307         PANTHOR_EXCEPTION(IMPRECISE_FAULT),
308         PANTHOR_EXCEPTION(OOM),
309         PANTHOR_EXCEPTION(CSF_FW_INTERNAL_ERROR),
310         PANTHOR_EXCEPTION(CSF_RES_EVICTION_TIMEOUT),
311         PANTHOR_EXCEPTION(GPU_BUS_FAULT),
312         PANTHOR_EXCEPTION(GPU_SHAREABILITY_FAULT),
313         PANTHOR_EXCEPTION(SYS_SHAREABILITY_FAULT),
314         PANTHOR_EXCEPTION(GPU_CACHEABILITY_FAULT),
315         PANTHOR_EXCEPTION(TRANSLATION_FAULT_0),
316         PANTHOR_EXCEPTION(TRANSLATION_FAULT_1),
317         PANTHOR_EXCEPTION(TRANSLATION_FAULT_2),
318         PANTHOR_EXCEPTION(TRANSLATION_FAULT_3),
319         PANTHOR_EXCEPTION(TRANSLATION_FAULT_4),
320         PANTHOR_EXCEPTION(PERM_FAULT_0),
321         PANTHOR_EXCEPTION(PERM_FAULT_1),
322         PANTHOR_EXCEPTION(PERM_FAULT_2),
323         PANTHOR_EXCEPTION(PERM_FAULT_3),
324         PANTHOR_EXCEPTION(ACCESS_FLAG_1),
325         PANTHOR_EXCEPTION(ACCESS_FLAG_2),
326         PANTHOR_EXCEPTION(ACCESS_FLAG_3),
327         PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_IN),
328         PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT0),
329         PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT1),
330         PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT2),
331         PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT3),
332         PANTHOR_EXCEPTION(MEM_ATTR_FAULT_0),
333         PANTHOR_EXCEPTION(MEM_ATTR_FAULT_1),
334         PANTHOR_EXCEPTION(MEM_ATTR_FAULT_2),
335         PANTHOR_EXCEPTION(MEM_ATTR_FAULT_3),
336 };
337
338 const char *panthor_exception_name(struct panthor_device *ptdev, u32 exception_code)
339 {
340         if (exception_code >= ARRAY_SIZE(panthor_exception_infos) ||
341             !panthor_exception_infos[exception_code].name)
342                 return "Unknown exception type";
343
344         return panthor_exception_infos[exception_code].name;
345 }
346
347 static vm_fault_t panthor_mmio_vm_fault(struct vm_fault *vmf)
348 {
349         struct vm_area_struct *vma = vmf->vma;
350         struct panthor_device *ptdev = vma->vm_private_data;
351         u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
352         unsigned long pfn;
353         pgprot_t pgprot;
354         vm_fault_t ret;
355         bool active;
356         int cookie;
357
358         if (!drm_dev_enter(&ptdev->base, &cookie))
359                 return VM_FAULT_SIGBUS;
360
361         mutex_lock(&ptdev->pm.mmio_lock);
362         active = atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE;
363
364         switch (offset) {
365         case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
366                 if (active)
367                         pfn = __phys_to_pfn(ptdev->phys_addr + CSF_GPU_LATEST_FLUSH_ID);
368                 else
369                         pfn = page_to_pfn(ptdev->pm.dummy_latest_flush);
370                 break;
371
372         default:
373                 ret = VM_FAULT_SIGBUS;
374                 goto out_unlock;
375         }
376
377         pgprot = vma->vm_page_prot;
378         if (active)
379                 pgprot = pgprot_noncached(pgprot);
380
381         ret = vmf_insert_pfn_prot(vma, vmf->address, pfn, pgprot);
382
383 out_unlock:
384         mutex_unlock(&ptdev->pm.mmio_lock);
385         drm_dev_exit(cookie);
386         return ret;
387 }
388
389 static const struct vm_operations_struct panthor_mmio_vm_ops = {
390         .fault = panthor_mmio_vm_fault,
391 };
392
393 int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct *vma)
394 {
395         u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
396
397         switch (offset) {
398         case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
399                 if (vma->vm_end - vma->vm_start != PAGE_SIZE ||
400                     (vma->vm_flags & (VM_WRITE | VM_EXEC)))
401                         return -EINVAL;
402
403                 break;
404
405         default:
406                 return -EINVAL;
407         }
408
409         /* Defer actual mapping to the fault handler. */
410         vma->vm_private_data = ptdev;
411         vma->vm_ops = &panthor_mmio_vm_ops;
412         vm_flags_set(vma,
413                      VM_IO | VM_DONTCOPY | VM_DONTEXPAND |
414                      VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
415         return 0;
416 }
417
418 int panthor_device_resume(struct device *dev)
419 {
420         struct panthor_device *ptdev = dev_get_drvdata(dev);
421         int ret, cookie;
422
423         if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_SUSPENDED)
424                 return -EINVAL;
425
426         atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_RESUMING);
427
428         ret = clk_prepare_enable(ptdev->clks.core);
429         if (ret)
430                 goto err_set_suspended;
431
432         ret = clk_prepare_enable(ptdev->clks.stacks);
433         if (ret)
434                 goto err_disable_core_clk;
435
436         ret = clk_prepare_enable(ptdev->clks.coregroup);
437         if (ret)
438                 goto err_disable_stacks_clk;
439
440         ret = panthor_devfreq_resume(ptdev);
441         if (ret)
442                 goto err_disable_coregroup_clk;
443
444         if (panthor_device_is_initialized(ptdev) &&
445             drm_dev_enter(&ptdev->base, &cookie)) {
446                 panthor_gpu_resume(ptdev);
447                 panthor_mmu_resume(ptdev);
448                 ret = drm_WARN_ON(&ptdev->base, panthor_fw_resume(ptdev));
449                 if (!ret) {
450                         panthor_sched_resume(ptdev);
451                 } else {
452                         panthor_mmu_suspend(ptdev);
453                         panthor_gpu_suspend(ptdev);
454                 }
455
456                 drm_dev_exit(cookie);
457
458                 if (ret)
459                         goto err_suspend_devfreq;
460         }
461
462         if (atomic_read(&ptdev->reset.pending))
463                 queue_work(ptdev->reset.wq, &ptdev->reset.work);
464
465         /* Clear all IOMEM mappings pointing to this device after we've
466          * resumed. This way the fake mappings pointing to the dummy pages
467          * are removed and the real iomem mapping will be restored on next
468          * access.
469          */
470         mutex_lock(&ptdev->pm.mmio_lock);
471         unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
472                             DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
473         atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_ACTIVE);
474         mutex_unlock(&ptdev->pm.mmio_lock);
475         return 0;
476
477 err_suspend_devfreq:
478         panthor_devfreq_suspend(ptdev);
479
480 err_disable_coregroup_clk:
481         clk_disable_unprepare(ptdev->clks.coregroup);
482
483 err_disable_stacks_clk:
484         clk_disable_unprepare(ptdev->clks.stacks);
485
486 err_disable_core_clk:
487         clk_disable_unprepare(ptdev->clks.core);
488
489 err_set_suspended:
490         atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
491         return ret;
492 }
493
494 int panthor_device_suspend(struct device *dev)
495 {
496         struct panthor_device *ptdev = dev_get_drvdata(dev);
497         int ret, cookie;
498
499         if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
500                 return -EINVAL;
501
502         /* Clear all IOMEM mappings pointing to this device before we
503          * shutdown the power-domain and clocks. Failing to do that results
504          * in external aborts when the process accesses the iomem region.
505          * We change the state and call unmap_mapping_range() with the
506          * mmio_lock held to make sure the vm_fault handler won't set up
507          * invalid mappings.
508          */
509         mutex_lock(&ptdev->pm.mmio_lock);
510         atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDING);
511         unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
512                             DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
513         mutex_unlock(&ptdev->pm.mmio_lock);
514
515         if (panthor_device_is_initialized(ptdev) &&
516             drm_dev_enter(&ptdev->base, &cookie)) {
517                 cancel_work_sync(&ptdev->reset.work);
518
519                 /* We prepare everything as if we were resetting the GPU.
520                  * The end of the reset will happen in the resume path though.
521                  */
522                 panthor_sched_suspend(ptdev);
523                 panthor_fw_suspend(ptdev);
524                 panthor_mmu_suspend(ptdev);
525                 panthor_gpu_suspend(ptdev);
526                 drm_dev_exit(cookie);
527         }
528
529         ret = panthor_devfreq_suspend(ptdev);
530         if (ret) {
531                 if (panthor_device_is_initialized(ptdev) &&
532                     drm_dev_enter(&ptdev->base, &cookie)) {
533                         panthor_gpu_resume(ptdev);
534                         panthor_mmu_resume(ptdev);
535                         drm_WARN_ON(&ptdev->base, panthor_fw_resume(ptdev));
536                         panthor_sched_resume(ptdev);
537                         drm_dev_exit(cookie);
538                 }
539
540                 goto err_set_active;
541         }
542
543         clk_disable_unprepare(ptdev->clks.coregroup);
544         clk_disable_unprepare(ptdev->clks.stacks);
545         clk_disable_unprepare(ptdev->clks.core);
546         atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
547         return 0;
548
549 err_set_active:
550         /* If something failed and we have to revert back to an
551          * active state, we also need to clear the MMIO userspace
552          * mappings, so any dumb pages that were mapped while we
553          * were trying to suspend gets invalidated.
554          */
555         mutex_lock(&ptdev->pm.mmio_lock);
556         atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_ACTIVE);
557         unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
558                             DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
559         mutex_unlock(&ptdev->pm.mmio_lock);
560         return ret;
561 }
This page took 0.066643 seconds and 4 git commands to generate.