]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
Merge tag 'v6.6-rc2' into locking/core, to pick up fixes
[linux.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm_crtc.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: AMD
24  *
25  */
26 #include <drm/drm_vblank.h>
27 #include <drm/drm_atomic_helper.h>
28
29 #include "dc.h"
30 #include "amdgpu.h"
31 #include "amdgpu_dm_psr.h"
32 #include "amdgpu_dm_replay.h"
33 #include "amdgpu_dm_crtc.h"
34 #include "amdgpu_dm_plane.h"
35 #include "amdgpu_dm_trace.h"
36 #include "amdgpu_dm_debugfs.h"
37
38 void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
39 {
40         struct drm_crtc *crtc = &acrtc->base;
41         struct drm_device *dev = crtc->dev;
42         unsigned long flags;
43
44         drm_crtc_handle_vblank(crtc);
45
46         spin_lock_irqsave(&dev->event_lock, flags);
47
48         /* Send completion event for cursor-only commits */
49         if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
50                 drm_crtc_send_vblank_event(crtc, acrtc->event);
51                 drm_crtc_vblank_put(crtc);
52                 acrtc->event = NULL;
53         }
54
55         spin_unlock_irqrestore(&dev->event_lock, flags);
56 }
57
58 bool amdgpu_dm_crtc_modeset_required(struct drm_crtc_state *crtc_state,
59                              struct dc_stream_state *new_stream,
60                              struct dc_stream_state *old_stream)
61 {
62         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
63 }
64
65 bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc)
66
67 {
68         return acrtc->dm_irq_params.freesync_config.state ==
69                        VRR_STATE_ACTIVE_VARIABLE ||
70                acrtc->dm_irq_params.freesync_config.state ==
71                        VRR_STATE_ACTIVE_FIXED;
72 }
73
74 int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
75 {
76         enum dc_irq_source irq_source;
77         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
78         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
79         int rc;
80
81         if (acrtc->otg_inst == -1)
82                 return 0;
83
84         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
85
86         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
87
88         DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
89                       acrtc->crtc_id, enable ? "en" : "dis", rc);
90         return rc;
91 }
92
93 bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state)
94 {
95         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
96                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
97 }
98
99 static void vblank_control_worker(struct work_struct *work)
100 {
101         struct vblank_control_work *vblank_work =
102                 container_of(work, struct vblank_control_work, work);
103         struct amdgpu_display_manager *dm = vblank_work->dm;
104
105         mutex_lock(&dm->dc_lock);
106
107         if (vblank_work->enable)
108                 dm->active_vblank_irq_count++;
109         else if (dm->active_vblank_irq_count)
110                 dm->active_vblank_irq_count--;
111
112         dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
113
114         DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
115
116         /*
117          * Control PSR based on vblank requirements from OS
118          *
119          * If panel supports PSR SU, there's no need to disable PSR when OS is
120          * submitting fast atomic commits (we infer this by whether the OS
121          * requests vblank events). Fast atomic commits will simply trigger a
122          * full-frame-update (FFU); a specific case of selective-update (SU)
123          * where the SU region is the full hactive*vactive region. See
124          * fill_dc_dirty_rects().
125          */
126         if (vblank_work->stream && vblank_work->stream->link) {
127                 /*
128                  * Prioritize replay, instead of psr
129                  */
130                 if (vblank_work->stream->link->replay_settings.replay_feature_enabled)
131                         amdgpu_dm_replay_enable(vblank_work->stream, false);
132                 else if (vblank_work->enable) {
133                         if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
134                             vblank_work->stream->link->psr_settings.psr_allow_active)
135                                 amdgpu_dm_psr_disable(vblank_work->stream);
136                 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
137                            !vblank_work->stream->link->psr_settings.psr_allow_active &&
138 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
139                            !amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) &&
140 #endif
141                            vblank_work->stream->link->panel_config.psr.disallow_replay &&
142                            vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
143                         amdgpu_dm_psr_enable(vblank_work->stream);
144                 }
145         }
146
147         mutex_unlock(&dm->dc_lock);
148
149         dc_stream_release(vblank_work->stream);
150
151         kfree(vblank_work);
152 }
153
154 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
155 {
156         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
157         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
158         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
159         struct amdgpu_display_manager *dm = &adev->dm;
160         struct vblank_control_work *work;
161         int rc = 0;
162
163         if (acrtc->otg_inst == -1)
164                 goto skip;
165
166         if (enable) {
167                 /* vblank irq on -> Only need vupdate irq in vrr mode */
168                 if (amdgpu_dm_crtc_vrr_active(acrtc_state))
169                         rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
170         } else {
171                 /* vblank irq off -> vupdate irq off */
172                 rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false);
173         }
174
175         if (rc)
176                 return rc;
177
178         rc = (enable)
179                 ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id)
180                 : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id);
181
182         if (rc)
183                 return rc;
184
185 skip:
186         if (amdgpu_in_reset(adev))
187                 return 0;
188
189         if (dm->vblank_control_workqueue) {
190                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
191                 if (!work)
192                         return -ENOMEM;
193
194                 INIT_WORK(&work->work, vblank_control_worker);
195                 work->dm = dm;
196                 work->acrtc = acrtc;
197                 work->enable = enable;
198
199                 if (acrtc_state->stream) {
200                         dc_stream_retain(acrtc_state->stream);
201                         work->stream = acrtc_state->stream;
202                 }
203
204                 queue_work(dm->vblank_control_workqueue, &work->work);
205         }
206
207         return 0;
208 }
209
210 int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc)
211 {
212         return dm_set_vblank(crtc, true);
213 }
214
215 void amdgpu_dm_crtc_disable_vblank(struct drm_crtc *crtc)
216 {
217         dm_set_vblank(crtc, false);
218 }
219
220 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
221                                   struct drm_crtc_state *state)
222 {
223         struct dm_crtc_state *cur = to_dm_crtc_state(state);
224
225         /* TODO Destroy dc_stream objects are stream object is flattened */
226         if (cur->stream)
227                 dc_stream_release(cur->stream);
228
229
230         __drm_atomic_helper_crtc_destroy_state(state);
231
232
233         kfree(state);
234 }
235
236 static struct drm_crtc_state *dm_crtc_duplicate_state(struct drm_crtc *crtc)
237 {
238         struct dm_crtc_state *state, *cur;
239
240         cur = to_dm_crtc_state(crtc->state);
241
242         if (WARN_ON(!crtc->state))
243                 return NULL;
244
245         state = kzalloc(sizeof(*state), GFP_KERNEL);
246         if (!state)
247                 return NULL;
248
249         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
250
251         if (cur->stream) {
252                 state->stream = cur->stream;
253                 dc_stream_retain(state->stream);
254         }
255
256         state->active_planes = cur->active_planes;
257         state->vrr_infopacket = cur->vrr_infopacket;
258         state->abm_level = cur->abm_level;
259         state->vrr_supported = cur->vrr_supported;
260         state->freesync_config = cur->freesync_config;
261         state->cm_has_degamma = cur->cm_has_degamma;
262         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
263         state->crc_skip_count = cur->crc_skip_count;
264         state->mpo_requested = cur->mpo_requested;
265         /* TODO Duplicate dc_stream after objects are stream object is flattened */
266
267         return &state->base;
268 }
269
270 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
271 {
272         drm_crtc_cleanup(crtc);
273         kfree(crtc);
274 }
275
276 static void dm_crtc_reset_state(struct drm_crtc *crtc)
277 {
278         struct dm_crtc_state *state;
279
280         if (crtc->state)
281                 dm_crtc_destroy_state(crtc, crtc->state);
282
283         state = kzalloc(sizeof(*state), GFP_KERNEL);
284         if (WARN_ON(!state))
285                 return;
286
287         __drm_atomic_helper_crtc_reset(crtc, &state->base);
288 }
289
290 #ifdef CONFIG_DEBUG_FS
291 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
292 {
293         crtc_debugfs_init(crtc);
294
295         return 0;
296 }
297 #endif
298
299 /* Implemented only the options currently available for the driver */
300 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
301         .reset = dm_crtc_reset_state,
302         .destroy = amdgpu_dm_crtc_destroy,
303         .set_config = drm_atomic_helper_set_config,
304         .page_flip = drm_atomic_helper_page_flip,
305         .atomic_duplicate_state = dm_crtc_duplicate_state,
306         .atomic_destroy_state = dm_crtc_destroy_state,
307         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
308         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
309         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
310         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
311         .enable_vblank = amdgpu_dm_crtc_enable_vblank,
312         .disable_vblank = amdgpu_dm_crtc_disable_vblank,
313         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
314 #if defined(CONFIG_DEBUG_FS)
315         .late_register = amdgpu_dm_crtc_late_register,
316 #endif
317 };
318
319 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
320 {
321 }
322
323 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
324 {
325         struct drm_atomic_state *state = new_crtc_state->state;
326         struct drm_plane *plane;
327         int num_active = 0;
328
329         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
330                 struct drm_plane_state *new_plane_state;
331
332                 /* Cursor planes are "fake". */
333                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
334                         continue;
335
336                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
337
338                 if (!new_plane_state) {
339                         /*
340                          * The plane is enable on the CRTC and hasn't changed
341                          * state. This means that it previously passed
342                          * validation and is therefore enabled.
343                          */
344                         num_active += 1;
345                         continue;
346                 }
347
348                 /* We need a framebuffer to be considered enabled. */
349                 num_active += (new_plane_state->fb != NULL);
350         }
351
352         return num_active;
353 }
354
355 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
356                                          struct drm_crtc_state *new_crtc_state)
357 {
358         struct dm_crtc_state *dm_new_crtc_state =
359                 to_dm_crtc_state(new_crtc_state);
360
361         dm_new_crtc_state->active_planes = 0;
362
363         if (!dm_new_crtc_state->stream)
364                 return;
365
366         dm_new_crtc_state->active_planes =
367                 count_crtc_active_planes(new_crtc_state);
368 }
369
370 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
371                                       const struct drm_display_mode *mode,
372                                       struct drm_display_mode *adjusted_mode)
373 {
374         return true;
375 }
376
377 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
378                                       struct drm_atomic_state *state)
379 {
380         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
381                                                                                 crtc);
382         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
383         struct dc *dc = adev->dm.dc;
384         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
385         int ret = -EINVAL;
386
387         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
388
389         dm_update_crtc_active_planes(crtc, crtc_state);
390
391         if (WARN_ON(unlikely(!dm_crtc_state->stream &&
392                         amdgpu_dm_crtc_modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
393                 return ret;
394         }
395
396         /*
397          * We require the primary plane to be enabled whenever the CRTC is, otherwise
398          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
399          * planes are disabled, which is not supported by the hardware. And there is legacy
400          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
401          */
402         if (crtc_state->enable &&
403                 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
404                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
405                 return -EINVAL;
406         }
407
408         /*
409          * Only allow async flips for fast updates that don't change the FB
410          * pitch, the DCC state, rotation, etc.
411          */
412         if (crtc_state->async_flip &&
413             dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
414                 drm_dbg_atomic(crtc->dev,
415                                "[CRTC:%d:%s] async flips are only supported for fast updates\n",
416                                crtc->base.id, crtc->name);
417                 return -EINVAL;
418         }
419
420         /* In some use cases, like reset, no stream is attached */
421         if (!dm_crtc_state->stream)
422                 return 0;
423
424         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
425                 return 0;
426
427         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
428         return ret;
429 }
430
431 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
432         .disable = dm_crtc_helper_disable,
433         .atomic_check = dm_crtc_helper_atomic_check,
434         .mode_fixup = dm_crtc_helper_mode_fixup,
435         .get_scanout_position = amdgpu_crtc_get_scanout_position,
436 };
437
438 int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
439                                struct drm_plane *plane,
440                                uint32_t crtc_index)
441 {
442         struct amdgpu_crtc *acrtc = NULL;
443         struct drm_plane *cursor_plane;
444         bool is_dcn;
445         int res = -ENOMEM;
446
447         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
448         if (!cursor_plane)
449                 goto fail;
450
451         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
452         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
453
454         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
455         if (!acrtc)
456                 goto fail;
457
458         res = drm_crtc_init_with_planes(
459                         dm->ddev,
460                         &acrtc->base,
461                         plane,
462                         cursor_plane,
463                         &amdgpu_dm_crtc_funcs, NULL);
464
465         if (res)
466                 goto fail;
467
468         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
469
470         /* Create (reset) the plane state */
471         if (acrtc->base.funcs->reset)
472                 acrtc->base.funcs->reset(&acrtc->base);
473
474         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
475         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
476
477         acrtc->crtc_id = crtc_index;
478         acrtc->base.enabled = false;
479         acrtc->otg_inst = -1;
480
481         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
482
483         /* Don't enable DRM CRTC degamma property for DCE since it doesn't
484          * support programmable degamma anywhere.
485          */
486         is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch;
487         drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0,
488                                    true, MAX_COLOR_LUT_ENTRIES);
489
490         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
491
492         return 0;
493
494 fail:
495         kfree(acrtc);
496         kfree(cursor_plane);
497         return res;
498 }
499
This page took 0.064615 seconds and 4 git commands to generate.