1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2019 Collabora ltd. */
5 #include <linux/devfreq.h>
6 #include <linux/devfreq_cooling.h>
7 #include <linux/platform_device.h>
8 #include <linux/pm_opp.h>
10 #include <drm/drm_managed.h>
12 #include "panthor_devfreq.h"
13 #include "panthor_device.h"
16 * struct panthor_devfreq - Device frequency management
18 struct panthor_devfreq {
19 /** @devfreq: devfreq device. */
20 struct devfreq *devfreq;
22 /** @gov_data: Governor data. */
23 struct devfreq_simple_ondemand_data gov_data;
25 /** @busy_time: Busy time. */
28 /** @idle_time: Idle time. */
31 /** @time_last_update: Last update time. */
32 ktime_t time_last_update;
34 /** @last_busy_state: True if the GPU was busy last time we updated the state. */
38 * @lock: Lock used to protect busy_time, idle_time, time_last_update and
41 * These fields can be accessed concurrently by panthor_devfreq_get_dev_status()
42 * and panthor_devfreq_record_{busy,idle}().
47 static void panthor_devfreq_update_utilization(struct panthor_devfreq *pdevfreq)
52 last = pdevfreq->time_last_update;
54 if (pdevfreq->last_busy_state)
55 pdevfreq->busy_time += ktime_sub(now, last);
57 pdevfreq->idle_time += ktime_sub(now, last);
59 pdevfreq->time_last_update = now;
62 static int panthor_devfreq_target(struct device *dev, unsigned long *freq,
65 struct dev_pm_opp *opp;
67 opp = devfreq_recommended_opp(dev, freq, flags);
72 return dev_pm_opp_set_rate(dev, *freq);
75 static void panthor_devfreq_reset(struct panthor_devfreq *pdevfreq)
77 pdevfreq->busy_time = 0;
78 pdevfreq->idle_time = 0;
79 pdevfreq->time_last_update = ktime_get();
82 static int panthor_devfreq_get_dev_status(struct device *dev,
83 struct devfreq_dev_status *status)
85 struct panthor_device *ptdev = dev_get_drvdata(dev);
86 struct panthor_devfreq *pdevfreq = ptdev->devfreq;
87 unsigned long irqflags;
89 status->current_frequency = clk_get_rate(ptdev->clks.core);
91 spin_lock_irqsave(&pdevfreq->lock, irqflags);
93 panthor_devfreq_update_utilization(pdevfreq);
95 status->total_time = ktime_to_ns(ktime_add(pdevfreq->busy_time,
96 pdevfreq->idle_time));
98 status->busy_time = ktime_to_ns(pdevfreq->busy_time);
100 panthor_devfreq_reset(pdevfreq);
102 spin_unlock_irqrestore(&pdevfreq->lock, irqflags);
104 drm_dbg(&ptdev->base, "busy %lu total %lu %lu %% freq %lu MHz\n",
105 status->busy_time, status->total_time,
106 status->busy_time / (status->total_time / 100),
107 status->current_frequency / 1000 / 1000);
112 static struct devfreq_dev_profile panthor_devfreq_profile = {
113 .timer = DEVFREQ_TIMER_DELAYED,
114 .polling_ms = 50, /* ~3 frames */
115 .target = panthor_devfreq_target,
116 .get_dev_status = panthor_devfreq_get_dev_status,
119 int panthor_devfreq_init(struct panthor_device *ptdev)
121 /* There's actually 2 regulators (mali and sram), but the OPP core only
124 * We assume the sram regulator is coupled with the mali one and let
125 * the coupling logic deal with voltage updates.
127 static const char * const reg_names[] = { "mali", NULL };
128 struct thermal_cooling_device *cooling;
129 struct device *dev = ptdev->base.dev;
130 struct panthor_devfreq *pdevfreq;
131 struct dev_pm_opp *opp;
132 unsigned long cur_freq;
135 pdevfreq = drmm_kzalloc(&ptdev->base, sizeof(*ptdev->devfreq), GFP_KERNEL);
139 ptdev->devfreq = pdevfreq;
141 ret = devm_pm_opp_set_regulators(dev, reg_names);
143 if (ret != -EPROBE_DEFER)
144 DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n");
149 ret = devm_pm_opp_of_add_table(dev);
153 spin_lock_init(&pdevfreq->lock);
155 panthor_devfreq_reset(pdevfreq);
157 cur_freq = clk_get_rate(ptdev->clks.core);
159 opp = devfreq_recommended_opp(dev, &cur_freq, 0);
163 panthor_devfreq_profile.initial_freq = cur_freq;
165 /* Regulator coupling only takes care of synchronizing/balancing voltage
166 * updates, but the coupled regulator needs to be enabled manually.
168 * We use devm_regulator_get_enable_optional() and keep the sram supply
169 * enabled until the device is removed, just like we do for the mali
170 * supply, which is enabled when dev_pm_opp_set_opp(dev, opp) is called,
171 * and disabled when the opp_table is torn down, using the devm action.
173 * If we really care about disabling regulators on suspend, we should:
174 * - use devm_regulator_get_optional() here
175 * - call dev_pm_opp_set_opp(dev, NULL) before leaving this function
176 * (this disables the regulator passed to the OPP layer)
177 * - call dev_pm_opp_set_opp(dev, NULL) and
178 * regulator_disable(ptdev->regulators.sram) in
179 * panthor_devfreq_suspend()
180 * - call dev_pm_opp_set_opp(dev, default_opp) and
181 * regulator_enable(ptdev->regulators.sram) in
182 * panthor_devfreq_resume()
184 * But without knowing if it's beneficial or not (in term of power
185 * consumption), or how much it slows down the suspend/resume steps,
186 * let's just keep regulators enabled for the device lifetime.
188 ret = devm_regulator_get_enable_optional(dev, "sram");
189 if (ret && ret != -ENODEV) {
190 if (ret != -EPROBE_DEFER)
191 DRM_DEV_ERROR(dev, "Couldn't retrieve/enable sram supply\n");
196 * Set the recommend OPP this will enable and configure the regulator
197 * if any and will avoid a switch off by regulator_late_cleanup()
199 ret = dev_pm_opp_set_opp(dev, opp);
201 DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
208 * Setup default thresholds for the simple_ondemand governor.
209 * The values are chosen based on experiments.
211 pdevfreq->gov_data.upthreshold = 45;
212 pdevfreq->gov_data.downdifferential = 5;
214 pdevfreq->devfreq = devm_devfreq_add_device(dev, &panthor_devfreq_profile,
215 DEVFREQ_GOV_SIMPLE_ONDEMAND,
216 &pdevfreq->gov_data);
217 if (IS_ERR(pdevfreq->devfreq)) {
218 DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n");
219 ret = PTR_ERR(pdevfreq->devfreq);
220 pdevfreq->devfreq = NULL;
224 cooling = devfreq_cooling_em_register(pdevfreq->devfreq, NULL);
226 DRM_DEV_INFO(dev, "Failed to register cooling device\n");
231 int panthor_devfreq_resume(struct panthor_device *ptdev)
233 struct panthor_devfreq *pdevfreq = ptdev->devfreq;
235 if (!pdevfreq->devfreq)
238 panthor_devfreq_reset(pdevfreq);
240 return devfreq_resume_device(pdevfreq->devfreq);
243 int panthor_devfreq_suspend(struct panthor_device *ptdev)
245 struct panthor_devfreq *pdevfreq = ptdev->devfreq;
247 if (!pdevfreq->devfreq)
250 return devfreq_suspend_device(pdevfreq->devfreq);
253 void panthor_devfreq_record_busy(struct panthor_device *ptdev)
255 struct panthor_devfreq *pdevfreq = ptdev->devfreq;
256 unsigned long irqflags;
258 if (!pdevfreq->devfreq)
261 spin_lock_irqsave(&pdevfreq->lock, irqflags);
263 panthor_devfreq_update_utilization(pdevfreq);
264 pdevfreq->last_busy_state = true;
266 spin_unlock_irqrestore(&pdevfreq->lock, irqflags);
269 void panthor_devfreq_record_idle(struct panthor_device *ptdev)
271 struct panthor_devfreq *pdevfreq = ptdev->devfreq;
272 unsigned long irqflags;
274 if (!pdevfreq->devfreq)
277 spin_lock_irqsave(&pdevfreq->lock, irqflags);
279 panthor_devfreq_update_utilization(pdevfreq);
280 pdevfreq->last_busy_state = false;
282 spin_unlock_irqrestore(&pdevfreq->lock, irqflags);