1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
11 #include <linux/clk.h>
12 #include <linux/regulator/consumer.h>
18 /* As there are different display controller blocks depending on the
19 * snapdragon version, the kms support is split out and the appropriate
20 * implementation is loaded at runtime. The kms module is responsible
21 * for constructing the appropriate planes/crtcs/encoders/connectors.
23 struct msm_kms_funcs {
24 /* hw initialization: */
25 int (*hw_init)(struct msm_kms *kms);
27 void (*irq_preinstall)(struct msm_kms *kms);
28 int (*irq_postinstall)(struct msm_kms *kms);
29 void (*irq_uninstall)(struct msm_kms *kms);
30 irqreturn_t (*irq)(struct msm_kms *kms);
31 int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
32 void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
35 * Atomic commit handling:
37 * Note that in the case of async commits, the funcs which take
38 * a crtc_mask (ie. ->flush_commit(), and ->complete_commit())
39 * might not be evenly balanced with ->prepare_commit(), however
40 * each crtc that effected by a ->prepare_commit() (potentially
41 * multiple times) will eventually (at end of vsync period) be
42 * flushed and completed.
44 * This has some implications about tracking of cleanup state,
45 * for example SMP blocks to release after commit completes. Ie.
46 * cleanup state should be also duplicated in the various
47 * duplicate_state() methods, as the current cleanup state at
48 * ->complete_commit() time may have accumulated cleanup work
49 * from multiple commits.
53 * Enable/disable power/clks needed for hw access done in other
54 * commit related methods.
56 * If mdp4 is migrated to runpm, we could probably drop these
57 * and use runpm directly.
59 void (*enable_commit)(struct msm_kms *kms);
60 void (*disable_commit)(struct msm_kms *kms);
63 * Prepare for atomic commit. This is called after any previous
64 * (async or otherwise) commit has completed.
66 void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
69 * Flush an atomic commit. This is called after the hardware
70 * updates have already been pushed down to effected planes/
71 * crtcs/encoders/connectors.
73 void (*flush_commit)(struct msm_kms *kms, unsigned crtc_mask);
76 * Wait for any in-progress flush to complete on the specified
77 * crtcs. This should not block if there is no in-progress
78 * commit (ie. don't just wait for a vblank), as it will also
79 * be called before ->prepare_commit() to ensure any potential
80 * "async" commit has completed.
82 void (*wait_flush)(struct msm_kms *kms, unsigned crtc_mask);
85 * Clean up after commit is completed. This is called after
86 * ->wait_flush(), to give the backend a chance to do any
87 * post-commit cleanup.
89 void (*complete_commit)(struct msm_kms *kms, unsigned crtc_mask);
96 long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
97 struct drm_encoder *encoder);
99 void (*destroy)(struct msm_kms *kms);
102 void (*snapshot)(struct msm_disp_state *disp_state, struct msm_kms *kms);
104 #ifdef CONFIG_DEBUG_FS
106 int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor);
113 * A per-crtc timer for pending async atomic flushes. Scheduled to expire
114 * shortly before vblank to flush pending async updates.
116 struct msm_pending_timer {
117 struct msm_hrtimer_work work;
118 struct kthread_worker *worker;
124 const struct msm_kms_funcs *funcs;
125 struct drm_device *dev;
127 /* irq number to be passed on to msm_irq_install */
131 /* mapper-id used to request GEM buffer mapped for scanout: */
132 struct msm_gem_address_space *aspace;
134 /* disp snapshot support */
135 struct kthread_worker *dump_worker;
136 struct kthread_work dump_work;
137 struct mutex dump_mutex;
140 * For async commit, where ->flush_commit() and later happens
141 * from the crtc's pending_timer close to end of the frame:
143 struct mutex commit_lock[MAX_CRTCS];
144 unsigned pending_crtc_mask;
145 struct msm_pending_timer pending_timers[MAX_CRTCS];
148 static inline int msm_kms_init(struct msm_kms *kms,
149 const struct msm_kms_funcs *funcs)
153 for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++)
154 mutex_init(&kms->commit_lock[i]);
158 for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++) {
159 ret = msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i);
168 static inline void msm_kms_destroy(struct msm_kms *kms)
172 for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++)
173 msm_atomic_destroy_pending_timer(&kms->pending_timers[i]);
176 #define for_each_crtc_mask(dev, crtc, crtc_mask) \
177 drm_for_each_crtc(crtc, dev) \
178 for_each_if (drm_crtc_mask(crtc) & (crtc_mask))
180 #define for_each_crtc_mask_reverse(dev, crtc, crtc_mask) \
181 drm_for_each_crtc_reverse(crtc, dev) \
182 for_each_if (drm_crtc_mask(crtc) & (crtc_mask))
184 int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv);
185 void msm_drm_kms_uninit(struct device *dev);
187 #endif /* __MSM_KMS_H__ */