]> Git Repo - linux.git/blob - drivers/gpu/drm/vkms/vkms_crtc.c
Linux 6.14-rc3
[linux.git] / drivers / gpu / drm / vkms / vkms_crtc.c
1 // SPDX-License-Identifier: GPL-2.0+
2
3 #include <linux/dma-fence.h>
4
5 #include <drm/drm_atomic.h>
6 #include <drm/drm_atomic_helper.h>
7 #include <drm/drm_probe_helper.h>
8 #include <drm/drm_vblank.h>
9
10 #include "vkms_drv.h"
11
12 static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
13 {
14         struct vkms_output *output = container_of(timer, struct vkms_output,
15                                                   vblank_hrtimer);
16         struct drm_crtc *crtc = &output->crtc;
17         struct vkms_crtc_state *state;
18         u64 ret_overrun;
19         bool ret, fence_cookie;
20
21         fence_cookie = dma_fence_begin_signalling();
22
23         ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
24                                           output->period_ns);
25         if (ret_overrun != 1)
26                 pr_warn("%s: vblank timer overrun\n", __func__);
27
28         spin_lock(&output->lock);
29         ret = drm_crtc_handle_vblank(crtc);
30         if (!ret)
31                 DRM_ERROR("vkms failure on handling vblank");
32
33         state = output->composer_state;
34         spin_unlock(&output->lock);
35
36         if (state && output->composer_enabled) {
37                 u64 frame = drm_crtc_accurate_vblank_count(crtc);
38
39                 /* update frame_start only if a queued vkms_composer_worker()
40                  * has read the data
41                  */
42                 spin_lock(&output->composer_lock);
43                 if (!state->crc_pending)
44                         state->frame_start = frame;
45                 else
46                         DRM_DEBUG_DRIVER("crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
47                                          state->frame_start, frame);
48                 state->frame_end = frame;
49                 state->crc_pending = true;
50                 spin_unlock(&output->composer_lock);
51
52                 ret = queue_work(output->composer_workq, &state->composer_work);
53                 if (!ret)
54                         DRM_DEBUG_DRIVER("Composer worker already queued\n");
55         }
56
57         dma_fence_end_signalling(fence_cookie);
58
59         return HRTIMER_RESTART;
60 }
61
62 static int vkms_enable_vblank(struct drm_crtc *crtc)
63 {
64         struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
65         struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
66
67         hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
68         out->vblank_hrtimer.function = &vkms_vblank_simulate;
69         out->period_ns = ktime_set(0, vblank->framedur_ns);
70         hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
71
72         return 0;
73 }
74
75 static void vkms_disable_vblank(struct drm_crtc *crtc)
76 {
77         struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
78
79         hrtimer_cancel(&out->vblank_hrtimer);
80 }
81
82 static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
83                                       int *max_error, ktime_t *vblank_time,
84                                       bool in_vblank_irq)
85 {
86         struct drm_device *dev = crtc->dev;
87         struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
88         struct vkms_output *output = &vkmsdev->output;
89         struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
90
91         if (!READ_ONCE(vblank->enabled)) {
92                 *vblank_time = ktime_get();
93                 return true;
94         }
95
96         *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
97
98         if (WARN_ON(*vblank_time == vblank->time))
99                 return true;
100
101         /*
102          * To prevent races we roll the hrtimer forward before we do any
103          * interrupt processing - this is how real hw works (the interrupt is
104          * only generated after all the vblank registers are updated) and what
105          * the vblank core expects. Therefore we need to always correct the
106          * timestampe by one frame.
107          */
108         *vblank_time -= output->period_ns;
109
110         return true;
111 }
112
113 static struct drm_crtc_state *
114 vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
115 {
116         struct vkms_crtc_state *vkms_state;
117
118         if (WARN_ON(!crtc->state))
119                 return NULL;
120
121         vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
122         if (!vkms_state)
123                 return NULL;
124
125         __drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base);
126
127         INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
128
129         return &vkms_state->base;
130 }
131
132 static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc,
133                                            struct drm_crtc_state *state)
134 {
135         struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
136
137         __drm_atomic_helper_crtc_destroy_state(state);
138
139         WARN_ON(work_pending(&vkms_state->composer_work));
140         kfree(vkms_state->active_planes);
141         kfree(vkms_state);
142 }
143
144 static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
145 {
146         struct vkms_crtc_state *vkms_state =
147                 kzalloc(sizeof(*vkms_state), GFP_KERNEL);
148
149         if (crtc->state)
150                 vkms_atomic_crtc_destroy_state(crtc, crtc->state);
151
152         __drm_atomic_helper_crtc_reset(crtc, &vkms_state->base);
153         if (vkms_state)
154                 INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
155 }
156
157 static const struct drm_crtc_funcs vkms_crtc_funcs = {
158         .set_config             = drm_atomic_helper_set_config,
159         .page_flip              = drm_atomic_helper_page_flip,
160         .reset                  = vkms_atomic_crtc_reset,
161         .atomic_duplicate_state = vkms_atomic_crtc_duplicate_state,
162         .atomic_destroy_state   = vkms_atomic_crtc_destroy_state,
163         .enable_vblank          = vkms_enable_vblank,
164         .disable_vblank         = vkms_disable_vblank,
165         .get_vblank_timestamp   = vkms_get_vblank_timestamp,
166         .get_crc_sources        = vkms_get_crc_sources,
167         .set_crc_source         = vkms_set_crc_source,
168         .verify_crc_source      = vkms_verify_crc_source,
169 };
170
171 static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
172                                   struct drm_atomic_state *state)
173 {
174         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
175                                                                           crtc);
176         struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(crtc_state);
177         struct drm_plane *plane;
178         struct drm_plane_state *plane_state;
179         int i = 0, ret;
180
181         if (vkms_state->active_planes)
182                 return 0;
183
184         ret = drm_atomic_add_affected_planes(crtc_state->state, crtc);
185         if (ret < 0)
186                 return ret;
187
188         drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
189                 plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane);
190                 WARN_ON(!plane_state);
191
192                 if (!plane_state->visible)
193                         continue;
194
195                 i++;
196         }
197
198         vkms_state->active_planes = kcalloc(i, sizeof(plane), GFP_KERNEL);
199         if (!vkms_state->active_planes)
200                 return -ENOMEM;
201         vkms_state->num_active_planes = i;
202
203         i = 0;
204         drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
205                 plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane);
206
207                 if (!plane_state->visible)
208                         continue;
209
210                 vkms_state->active_planes[i++] =
211                         to_vkms_plane_state(plane_state);
212         }
213
214         return 0;
215 }
216
217 static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
218                                     struct drm_atomic_state *state)
219 {
220         drm_crtc_vblank_on(crtc);
221 }
222
223 static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
224                                      struct drm_atomic_state *state)
225 {
226         drm_crtc_vblank_off(crtc);
227 }
228
229 static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
230                                    struct drm_atomic_state *state)
231         __acquires(&vkms_output->lock)
232 {
233         struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
234
235         /* This lock is held across the atomic commit to block vblank timer
236          * from scheduling vkms_composer_worker until the composer is updated
237          */
238         spin_lock_irq(&vkms_output->lock);
239 }
240
241 static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
242                                    struct drm_atomic_state *state)
243         __releases(&vkms_output->lock)
244 {
245         struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
246
247         if (crtc->state->event) {
248                 spin_lock(&crtc->dev->event_lock);
249
250                 if (drm_crtc_vblank_get(crtc) != 0)
251                         drm_crtc_send_vblank_event(crtc, crtc->state->event);
252                 else
253                         drm_crtc_arm_vblank_event(crtc, crtc->state->event);
254
255                 spin_unlock(&crtc->dev->event_lock);
256
257                 crtc->state->event = NULL;
258         }
259
260         vkms_output->composer_state = to_vkms_crtc_state(crtc->state);
261
262         spin_unlock_irq(&vkms_output->lock);
263 }
264
265 static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
266         .atomic_check   = vkms_crtc_atomic_check,
267         .atomic_begin   = vkms_crtc_atomic_begin,
268         .atomic_flush   = vkms_crtc_atomic_flush,
269         .atomic_enable  = vkms_crtc_atomic_enable,
270         .atomic_disable = vkms_crtc_atomic_disable,
271 };
272
273 int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
274                    struct drm_plane *primary, struct drm_plane *cursor)
275 {
276         struct vkms_output *vkms_out = drm_crtc_to_vkms_output(crtc);
277         int ret;
278
279         ret = drmm_crtc_init_with_planes(dev, crtc, primary, cursor,
280                                          &vkms_crtc_funcs, NULL);
281         if (ret) {
282                 DRM_ERROR("Failed to init CRTC\n");
283                 return ret;
284         }
285
286         drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
287
288         ret = drm_mode_crtc_set_gamma_size(crtc, VKMS_LUT_SIZE);
289         if (ret) {
290                 DRM_ERROR("Failed to set gamma size\n");
291                 return ret;
292         }
293
294         drm_crtc_enable_color_mgmt(crtc, 0, false, VKMS_LUT_SIZE);
295
296         spin_lock_init(&vkms_out->lock);
297         spin_lock_init(&vkms_out->composer_lock);
298
299         vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0);
300         if (!vkms_out->composer_workq)
301                 return -ENOMEM;
302
303         return ret;
304 }
This page took 0.049218 seconds and 4 git commands to generate.