]> Git Repo - linux.git/blob - drivers/gpu/drm/msm/msm_kms.c
Linux 6.14-rc3
[linux.git] / drivers / gpu / drm / msm / msm_kms.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
4  * Copyright (C) 2013 Red Hat
5  * Author: Rob Clark <[email protected]>
6  */
7
8 #include <linux/aperture.h>
9 #include <linux/kthread.h>
10 #include <linux/sched/mm.h>
11 #include <uapi/linux/sched/types.h>
12
13 #include <drm/drm_drv.h>
14 #include <drm/drm_mode_config.h>
15 #include <drm/drm_vblank.h>
16
17 #include "disp/msm_disp_snapshot.h"
18 #include "msm_drv.h"
19 #include "msm_gem.h"
20 #include "msm_kms.h"
21 #include "msm_mmu.h"
22
23 static const struct drm_mode_config_funcs mode_config_funcs = {
24         .fb_create = msm_framebuffer_create,
25         .atomic_check = msm_atomic_check,
26         .atomic_commit = drm_atomic_helper_commit,
27 };
28
29 static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
30         .atomic_commit_tail = msm_atomic_commit_tail,
31 };
32
33 static irqreturn_t msm_irq(int irq, void *arg)
34 {
35         struct drm_device *dev = arg;
36         struct msm_drm_private *priv = dev->dev_private;
37         struct msm_kms *kms = priv->kms;
38
39         BUG_ON(!kms);
40
41         return kms->funcs->irq(kms);
42 }
43
44 static void msm_irq_preinstall(struct drm_device *dev)
45 {
46         struct msm_drm_private *priv = dev->dev_private;
47         struct msm_kms *kms = priv->kms;
48
49         BUG_ON(!kms);
50
51         kms->funcs->irq_preinstall(kms);
52 }
53
54 static int msm_irq_postinstall(struct drm_device *dev)
55 {
56         struct msm_drm_private *priv = dev->dev_private;
57         struct msm_kms *kms = priv->kms;
58
59         BUG_ON(!kms);
60
61         if (kms->funcs->irq_postinstall)
62                 return kms->funcs->irq_postinstall(kms);
63
64         return 0;
65 }
66
67 static int msm_irq_install(struct drm_device *dev, unsigned int irq)
68 {
69         struct msm_drm_private *priv = dev->dev_private;
70         struct msm_kms *kms = priv->kms;
71         int ret;
72
73         if (irq == IRQ_NOTCONNECTED)
74                 return -ENOTCONN;
75
76         msm_irq_preinstall(dev);
77
78         ret = request_irq(irq, msm_irq, 0, dev->driver->name, dev);
79         if (ret)
80                 return ret;
81
82         kms->irq_requested = true;
83
84         ret = msm_irq_postinstall(dev);
85         if (ret) {
86                 free_irq(irq, dev);
87                 return ret;
88         }
89
90         return 0;
91 }
92
93 static void msm_irq_uninstall(struct drm_device *dev)
94 {
95         struct msm_drm_private *priv = dev->dev_private;
96         struct msm_kms *kms = priv->kms;
97
98         kms->funcs->irq_uninstall(kms);
99         if (kms->irq_requested)
100                 free_irq(kms->irq, dev);
101 }
102
103 struct msm_vblank_work {
104         struct work_struct work;
105         struct drm_crtc *crtc;
106         bool enable;
107         struct msm_drm_private *priv;
108 };
109
110 static void vblank_ctrl_worker(struct work_struct *work)
111 {
112         struct msm_vblank_work *vbl_work = container_of(work,
113                                                 struct msm_vblank_work, work);
114         struct msm_drm_private *priv = vbl_work->priv;
115         struct msm_kms *kms = priv->kms;
116
117         if (vbl_work->enable)
118                 kms->funcs->enable_vblank(kms, vbl_work->crtc);
119         else
120                 kms->funcs->disable_vblank(kms, vbl_work->crtc);
121
122         kfree(vbl_work);
123 }
124
125 static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
126                                   struct drm_crtc *crtc, bool enable)
127 {
128         struct msm_vblank_work *vbl_work;
129
130         vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
131         if (!vbl_work)
132                 return -ENOMEM;
133
134         INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
135
136         vbl_work->crtc = crtc;
137         vbl_work->enable = enable;
138         vbl_work->priv = priv;
139
140         queue_work(priv->wq, &vbl_work->work);
141
142         return 0;
143 }
144
145 int msm_crtc_enable_vblank(struct drm_crtc *crtc)
146 {
147         struct drm_device *dev = crtc->dev;
148         struct msm_drm_private *priv = dev->dev_private;
149         struct msm_kms *kms = priv->kms;
150         if (!kms)
151                 return -ENXIO;
152         drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
153         return vblank_ctrl_queue_work(priv, crtc, true);
154 }
155
156 void msm_crtc_disable_vblank(struct drm_crtc *crtc)
157 {
158         struct drm_device *dev = crtc->dev;
159         struct msm_drm_private *priv = dev->dev_private;
160         struct msm_kms *kms = priv->kms;
161         if (!kms)
162                 return;
163         drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
164         vblank_ctrl_queue_work(priv, crtc, false);
165 }
166
167 struct msm_gem_address_space *msm_kms_init_aspace(struct drm_device *dev)
168 {
169         struct msm_gem_address_space *aspace;
170         struct msm_mmu *mmu;
171         struct device *mdp_dev = dev->dev;
172         struct device *mdss_dev = mdp_dev->parent;
173         struct device *iommu_dev;
174
175         /*
176          * IOMMUs can be a part of MDSS device tree binding, or the
177          * MDP/DPU device.
178          */
179         if (device_iommu_mapped(mdp_dev))
180                 iommu_dev = mdp_dev;
181         else
182                 iommu_dev = mdss_dev;
183
184         mmu = msm_iommu_new(iommu_dev, 0);
185         if (IS_ERR(mmu))
186                 return ERR_CAST(mmu);
187
188         if (!mmu) {
189                 drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
190                 return NULL;
191         }
192
193         aspace = msm_gem_address_space_create(mmu, "mdp_kms",
194                 0x1000, 0x100000000 - 0x1000);
195         if (IS_ERR(aspace)) {
196                 dev_err(mdp_dev, "aspace create, error %pe\n", aspace);
197                 mmu->funcs->destroy(mmu);
198         }
199
200         return aspace;
201 }
202
203 void msm_drm_kms_uninit(struct device *dev)
204 {
205         struct platform_device *pdev = to_platform_device(dev);
206         struct msm_drm_private *priv = platform_get_drvdata(pdev);
207         struct drm_device *ddev = priv->dev;
208         struct msm_kms *kms = priv->kms;
209         int i;
210
211         BUG_ON(!kms);
212
213         /* clean up event worker threads */
214         for (i = 0; i < priv->num_crtcs; i++) {
215                 if (priv->event_thread[i].worker)
216                         kthread_destroy_worker(priv->event_thread[i].worker);
217         }
218
219         drm_kms_helper_poll_fini(ddev);
220
221         msm_disp_snapshot_destroy(ddev);
222
223         pm_runtime_get_sync(dev);
224         msm_irq_uninstall(ddev);
225         pm_runtime_put_sync(dev);
226
227         if (kms && kms->funcs)
228                 kms->funcs->destroy(kms);
229 }
230
231 int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
232 {
233         struct msm_drm_private *priv = dev_get_drvdata(dev);
234         struct drm_device *ddev = priv->dev;
235         struct msm_kms *kms = priv->kms;
236         struct drm_crtc *crtc;
237         int ret;
238
239         /* the fw fb could be anywhere in memory */
240         ret = aperture_remove_all_conflicting_devices(drv->name);
241         if (ret)
242                 return ret;
243
244         ret = priv->kms_init(ddev);
245         if (ret) {
246                 DRM_DEV_ERROR(dev, "failed to load kms\n");
247                 return ret;
248         }
249
250         /* Enable normalization of plane zpos */
251         ddev->mode_config.normalize_zpos = true;
252
253         ddev->mode_config.funcs = &mode_config_funcs;
254         ddev->mode_config.helper_private = &mode_config_helper_funcs;
255
256         kms->dev = ddev;
257         ret = kms->funcs->hw_init(kms);
258         if (ret) {
259                 DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
260                 goto err_msm_uninit;
261         }
262
263         drm_helper_move_panel_connectors_to_head(ddev);
264
265         drm_for_each_crtc(crtc, ddev) {
266                 struct msm_drm_thread *ev_thread;
267
268                 /* initialize event thread */
269                 ev_thread = &priv->event_thread[drm_crtc_index(crtc)];
270                 ev_thread->dev = ddev;
271                 ev_thread->worker = kthread_run_worker(0, "crtc_event:%d", crtc->base.id);
272                 if (IS_ERR(ev_thread->worker)) {
273                         ret = PTR_ERR(ev_thread->worker);
274                         DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
275                         ev_thread->worker = NULL;
276                         goto err_msm_uninit;
277                 }
278
279                 sched_set_fifo(ev_thread->worker->task);
280         }
281
282         ret = drm_vblank_init(ddev, priv->num_crtcs);
283         if (ret < 0) {
284                 DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
285                 goto err_msm_uninit;
286         }
287
288         pm_runtime_get_sync(dev);
289         ret = msm_irq_install(ddev, kms->irq);
290         pm_runtime_put_sync(dev);
291         if (ret < 0) {
292                 DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
293                 goto err_msm_uninit;
294         }
295
296         ret = msm_disp_snapshot_init(ddev);
297         if (ret)
298                 DRM_DEV_ERROR(dev, "msm_disp_snapshot_init failed ret = %d\n", ret);
299
300         drm_mode_config_reset(ddev);
301
302         return 0;
303
304 err_msm_uninit:
305         return ret;
306 }
307
308 int msm_kms_pm_prepare(struct device *dev)
309 {
310         struct msm_drm_private *priv = dev_get_drvdata(dev);
311         struct drm_device *ddev = priv ? priv->dev : NULL;
312
313         if (!priv || !priv->kms)
314                 return 0;
315
316         return drm_mode_config_helper_suspend(ddev);
317 }
318
319 void msm_kms_pm_complete(struct device *dev)
320 {
321         struct msm_drm_private *priv = dev_get_drvdata(dev);
322         struct drm_device *ddev = priv ? priv->dev : NULL;
323
324         if (!priv || !priv->kms)
325                 return;
326
327         drm_mode_config_helper_resume(ddev);
328 }
329
330 void msm_kms_shutdown(struct platform_device *pdev)
331 {
332         struct msm_drm_private *priv = platform_get_drvdata(pdev);
333         struct drm_device *drm = priv ? priv->dev : NULL;
334
335         /*
336          * Shutdown the hw if we're far enough along where things might be on.
337          * If we run this too early, we'll end up panicking in any variety of
338          * places. Since we don't register the drm device until late in
339          * msm_drm_init, drm_dev->registered is used as an indicator that the
340          * shutdown will be successful.
341          */
342         if (drm && drm->registered && priv->kms)
343                 drm_atomic_helper_shutdown(drm);
344 }
This page took 0.0472900000000001 seconds and 4 git commands to generate.