1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6 #include <linux/bitfield.h>
7 #include <linux/cpufreq.h>
8 #include <linux/init.h>
9 #include <linux/interconnect.h>
10 #include <linux/interrupt.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/of_address.h>
14 #include <linux/of_platform.h>
15 #include <linux/pm_opp.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/units.h>
20 #define LUT_MAX_ENTRIES 40U
21 #define LUT_SRC GENMASK(31, 30)
22 #define LUT_L_VAL GENMASK(7, 0)
23 #define LUT_CORE_COUNT GENMASK(18, 16)
24 #define LUT_VOLT GENMASK(11, 0)
26 #define LUT_TURBO_IND 1
28 #define GT_IRQ_STATUS BIT(2)
30 struct qcom_cpufreq_soc_data {
42 struct qcom_cpufreq_data {
45 const struct qcom_cpufreq_soc_data *soc_data;
48 * Mutex to synchronize between de-init sequence and re-starting LMh
51 struct mutex throttle_lock;
55 struct delayed_work throttle_work;
56 struct cpufreq_policy *policy;
61 static unsigned long cpu_hw_rate, xo_rate;
62 static bool icc_scaling_enabled;
64 static int qcom_cpufreq_set_bw(struct cpufreq_policy *policy,
65 unsigned long freq_khz)
67 unsigned long freq_hz = freq_khz * 1000;
68 struct dev_pm_opp *opp;
72 dev = get_cpu_device(policy->cpu);
76 opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true);
80 ret = dev_pm_opp_set_opp(dev, opp);
85 static int qcom_cpufreq_update_opp(struct device *cpu_dev,
86 unsigned long freq_khz,
89 unsigned long freq_hz = freq_khz * 1000;
92 /* Skip voltage update if the opp table is not available */
93 if (!icc_scaling_enabled)
94 return dev_pm_opp_add(cpu_dev, freq_hz, volt);
96 ret = dev_pm_opp_adjust_voltage(cpu_dev, freq_hz, volt, volt, volt);
98 dev_err(cpu_dev, "Voltage update failed freq=%ld\n", freq_khz);
102 return dev_pm_opp_enable(cpu_dev, freq_hz);
105 static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
108 struct qcom_cpufreq_data *data = policy->driver_data;
109 const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
110 unsigned long freq = policy->freq_table[index].frequency;
113 writel_relaxed(index, data->base + soc_data->reg_perf_state);
115 if (data->per_core_dcvs)
116 for (i = 1; i < cpumask_weight(policy->related_cpus); i++)
117 writel_relaxed(index, data->base + soc_data->reg_perf_state + i * 4);
119 if (icc_scaling_enabled)
120 qcom_cpufreq_set_bw(policy, freq);
125 static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
127 struct qcom_cpufreq_data *data;
128 const struct qcom_cpufreq_soc_data *soc_data;
129 struct cpufreq_policy *policy;
132 policy = cpufreq_cpu_get_raw(cpu);
136 data = policy->driver_data;
137 soc_data = data->soc_data;
139 index = readl_relaxed(data->base + soc_data->reg_perf_state);
140 index = min(index, LUT_MAX_ENTRIES - 1);
142 return policy->freq_table[index].frequency;
145 static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
146 unsigned int target_freq)
148 struct qcom_cpufreq_data *data = policy->driver_data;
149 const struct qcom_cpufreq_soc_data *soc_data = data->soc_data;
153 index = policy->cached_resolved_idx;
154 writel_relaxed(index, data->base + soc_data->reg_perf_state);
156 if (data->per_core_dcvs)
157 for (i = 1; i < cpumask_weight(policy->related_cpus); i++)
158 writel_relaxed(index, data->base + soc_data->reg_perf_state + i * 4);
160 return policy->freq_table[index].frequency;
163 static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
164 struct cpufreq_policy *policy)
166 u32 data, src, lval, i, core_count, prev_freq = 0, freq;
168 struct cpufreq_frequency_table *table;
169 struct dev_pm_opp *opp;
172 struct qcom_cpufreq_data *drv_data = policy->driver_data;
173 const struct qcom_cpufreq_soc_data *soc_data = drv_data->soc_data;
175 table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL);
179 ret = dev_pm_opp_of_add_table(cpu_dev);
181 /* Disable all opps and cross-validate against LUT later */
182 icc_scaling_enabled = true;
183 for (rate = 0; ; rate++) {
184 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
189 dev_pm_opp_disable(cpu_dev, rate);
191 } else if (ret != -ENODEV) {
192 dev_err(cpu_dev, "Invalid opp table in device tree\n");
195 policy->fast_switch_possible = true;
196 icc_scaling_enabled = false;
199 for (i = 0; i < LUT_MAX_ENTRIES; i++) {
200 data = readl_relaxed(drv_data->base + soc_data->reg_freq_lut +
201 i * soc_data->lut_row_size);
202 src = FIELD_GET(LUT_SRC, data);
203 lval = FIELD_GET(LUT_L_VAL, data);
204 core_count = FIELD_GET(LUT_CORE_COUNT, data);
206 data = readl_relaxed(drv_data->base + soc_data->reg_volt_lut +
207 i * soc_data->lut_row_size);
208 volt = FIELD_GET(LUT_VOLT, data) * 1000;
211 freq = xo_rate * lval / 1000;
213 freq = cpu_hw_rate / 1000;
215 if (freq != prev_freq && core_count != LUT_TURBO_IND) {
216 if (!qcom_cpufreq_update_opp(cpu_dev, freq, volt)) {
217 table[i].frequency = freq;
218 dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
221 dev_warn(cpu_dev, "failed to update OPP for freq=%d\n", freq);
222 table[i].frequency = CPUFREQ_ENTRY_INVALID;
225 } else if (core_count == LUT_TURBO_IND) {
226 table[i].frequency = CPUFREQ_ENTRY_INVALID;
230 * Two of the same frequencies with the same core counts means
233 if (i > 0 && prev_freq == freq) {
234 struct cpufreq_frequency_table *prev = &table[i - 1];
237 * Only treat the last frequency that might be a boost
238 * as the boost frequency
240 if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
241 if (!qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt)) {
242 prev->frequency = prev_freq;
243 prev->flags = CPUFREQ_BOOST_FREQ;
245 dev_warn(cpu_dev, "failed to update OPP for freq=%d\n",
256 table[i].frequency = CPUFREQ_TABLE_END;
257 policy->freq_table = table;
258 dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
263 static void qcom_get_related_cpus(int index, struct cpumask *m)
265 struct device_node *cpu_np;
266 struct of_phandle_args args;
269 for_each_possible_cpu(cpu) {
270 cpu_np = of_cpu_device_node_get(cpu);
274 ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
275 "#freq-domain-cells", 0,
281 if (index == args.args[0])
282 cpumask_set_cpu(cpu, m);
286 static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
290 if (data->soc_data->reg_current_vote)
291 lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff;
293 lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff;
295 return lval * xo_rate;
298 static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
300 struct cpufreq_policy *policy = data->policy;
301 int cpu = cpumask_first(policy->related_cpus);
302 struct device *dev = get_cpu_device(cpu);
303 unsigned long freq_hz, throttled_freq;
304 struct dev_pm_opp *opp;
307 * Get the h/w throttled frequency, normalize it using the
308 * registered opp table and use it to calculate thermal pressure.
310 freq_hz = qcom_lmh_get_throttle_freq(data);
312 opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
313 if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
314 opp = dev_pm_opp_find_freq_ceil(dev, &freq_hz);
317 dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp);
319 throttled_freq = freq_hz / HZ_PER_KHZ;
321 /* Update thermal pressure (the boost frequencies are accepted) */
322 arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
328 * In the unlikely case policy is unregistered do not enable
329 * polling or h/w interrupt
331 mutex_lock(&data->throttle_lock);
332 if (data->cancel_throttle)
336 * If h/w throttled frequency is higher than what cpufreq has requested
337 * for, then stop polling and switch back to interrupt mechanism.
339 if (throttled_freq >= qcom_cpufreq_hw_get(cpu))
340 enable_irq(data->throttle_irq);
342 mod_delayed_work(system_highpri_wq, &data->throttle_work,
343 msecs_to_jiffies(10));
346 mutex_unlock(&data->throttle_lock);
349 static void qcom_lmh_dcvs_poll(struct work_struct *work)
351 struct qcom_cpufreq_data *data;
353 data = container_of(work, struct qcom_cpufreq_data, throttle_work.work);
354 qcom_lmh_dcvs_notify(data);
357 static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
359 struct qcom_cpufreq_data *c_data = data;
361 /* Disable interrupt and enable polling */
362 disable_irq_nosync(c_data->throttle_irq);
363 schedule_delayed_work(&c_data->throttle_work, 0);
365 if (c_data->soc_data->reg_intr_clr)
366 writel_relaxed(GT_IRQ_STATUS,
367 c_data->base + c_data->soc_data->reg_intr_clr);
372 static const struct qcom_cpufreq_soc_data qcom_soc_data = {
374 .reg_dcvs_ctrl = 0xbc,
375 .reg_freq_lut = 0x110,
376 .reg_volt_lut = 0x114,
377 .reg_current_vote = 0x704,
378 .reg_perf_state = 0x920,
382 static const struct qcom_cpufreq_soc_data epss_soc_data = {
384 .reg_domain_state = 0x20,
385 .reg_dcvs_ctrl = 0xb0,
386 .reg_freq_lut = 0x100,
387 .reg_volt_lut = 0x200,
388 .reg_intr_clr = 0x308,
389 .reg_perf_state = 0x320,
393 static const struct of_device_id qcom_cpufreq_hw_match[] = {
394 { .compatible = "qcom,cpufreq-hw", .data = &qcom_soc_data },
395 { .compatible = "qcom,cpufreq-epss", .data = &epss_soc_data },
398 MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match);
400 static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
402 struct qcom_cpufreq_data *data = policy->driver_data;
403 struct platform_device *pdev = cpufreq_get_driver_data();
407 * Look for LMh interrupt. If no interrupt line is specified /
408 * if there is an error, allow cpufreq to be enabled as usual.
410 data->throttle_irq = platform_get_irq_optional(pdev, index);
411 if (data->throttle_irq == -ENXIO)
413 if (data->throttle_irq < 0)
414 return data->throttle_irq;
416 data->cancel_throttle = false;
417 data->policy = policy;
419 mutex_init(&data->throttle_lock);
420 INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
422 snprintf(data->irq_name, sizeof(data->irq_name), "dcvsh-irq-%u", policy->cpu);
423 ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq,
424 IRQF_ONESHOT | IRQF_NO_AUTOEN, data->irq_name, data);
426 dev_err(&pdev->dev, "Error registering %s: %d\n", data->irq_name, ret);
430 ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
432 dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
433 data->irq_name, data->throttle_irq);
438 static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy)
440 struct qcom_cpufreq_data *data = policy->driver_data;
441 struct platform_device *pdev = cpufreq_get_driver_data();
444 if (data->throttle_irq <= 0)
447 mutex_lock(&data->throttle_lock);
448 data->cancel_throttle = false;
449 mutex_unlock(&data->throttle_lock);
451 ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
453 dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
454 data->irq_name, data->throttle_irq);
459 static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy)
461 struct qcom_cpufreq_data *data = policy->driver_data;
463 if (data->throttle_irq <= 0)
466 mutex_lock(&data->throttle_lock);
467 data->cancel_throttle = true;
468 mutex_unlock(&data->throttle_lock);
470 cancel_delayed_work_sync(&data->throttle_work);
471 irq_set_affinity_and_hint(data->throttle_irq, NULL);
472 disable_irq_nosync(data->throttle_irq);
477 static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
479 if (data->throttle_irq <= 0)
482 free_irq(data->throttle_irq, data);
485 static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
487 struct platform_device *pdev = cpufreq_get_driver_data();
488 struct device *dev = &pdev->dev;
489 struct of_phandle_args args;
490 struct device_node *cpu_np;
491 struct device *cpu_dev;
492 struct resource *res;
494 struct qcom_cpufreq_data *data;
497 cpu_dev = get_cpu_device(policy->cpu);
499 pr_err("%s: failed to get cpu%d device\n", __func__,
504 cpu_np = of_cpu_device_node_get(policy->cpu);
508 ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
509 "#freq-domain-cells", 0, &args);
514 index = args.args[0];
516 res = platform_get_resource(pdev, IORESOURCE_MEM, index);
518 dev_err(dev, "failed to get mem resource %d\n", index);
522 if (!request_mem_region(res->start, resource_size(res), res->name)) {
523 dev_err(dev, "failed to request resource %pR\n", res);
527 base = ioremap(res->start, resource_size(res));
529 dev_err(dev, "failed to map resource %pR\n", res);
534 data = kzalloc(sizeof(*data), GFP_KERNEL);
540 data->soc_data = of_device_get_match_data(&pdev->dev);
544 /* HW should be in enabled state to proceed */
545 if (!(readl_relaxed(base + data->soc_data->reg_enable) & 0x1)) {
546 dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index);
551 if (readl_relaxed(base + data->soc_data->reg_dcvs_ctrl) & 0x1)
552 data->per_core_dcvs = true;
554 qcom_get_related_cpus(index, policy->cpus);
555 if (cpumask_empty(policy->cpus)) {
556 dev_err(dev, "Domain-%d failed to get related CPUs\n", index);
561 policy->driver_data = data;
562 policy->dvfs_possible_from_any_cpu = true;
564 ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
566 dev_err(dev, "Domain-%d failed to read LUT\n", index);
570 ret = dev_pm_opp_get_opp_count(cpu_dev);
572 dev_err(cpu_dev, "Failed to add OPPs\n");
577 if (policy_has_boost_freq(policy)) {
578 ret = cpufreq_enable_boost_support();
580 dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
583 ret = qcom_cpufreq_hw_lmh_init(policy, index);
593 release_mem_region(res->start, resource_size(res));
597 static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
599 struct device *cpu_dev = get_cpu_device(policy->cpu);
600 struct qcom_cpufreq_data *data = policy->driver_data;
601 struct resource *res = data->res;
602 void __iomem *base = data->base;
604 dev_pm_opp_remove_all_dynamic(cpu_dev);
605 dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
606 qcom_cpufreq_hw_lmh_exit(data);
607 kfree(policy->freq_table);
610 release_mem_region(res->start, resource_size(res));
615 static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
617 struct qcom_cpufreq_data *data = policy->driver_data;
619 if (data->throttle_irq >= 0)
620 enable_irq(data->throttle_irq);
623 static struct freq_attr *qcom_cpufreq_hw_attr[] = {
624 &cpufreq_freq_attr_scaling_available_freqs,
625 &cpufreq_freq_attr_scaling_boost_freqs,
629 static struct cpufreq_driver cpufreq_qcom_hw_driver = {
630 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
631 CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
632 CPUFREQ_IS_COOLING_DEV,
633 .verify = cpufreq_generic_frequency_table_verify,
634 .target_index = qcom_cpufreq_hw_target_index,
635 .get = qcom_cpufreq_hw_get,
636 .init = qcom_cpufreq_hw_cpu_init,
637 .exit = qcom_cpufreq_hw_cpu_exit,
638 .online = qcom_cpufreq_hw_cpu_online,
639 .offline = qcom_cpufreq_hw_cpu_offline,
640 .register_em = cpufreq_register_em_with_opp,
641 .fast_switch = qcom_cpufreq_hw_fast_switch,
642 .name = "qcom-cpufreq-hw",
643 .attr = qcom_cpufreq_hw_attr,
644 .ready = qcom_cpufreq_ready,
647 static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
649 struct device *cpu_dev;
653 clk = clk_get(&pdev->dev, "xo");
657 xo_rate = clk_get_rate(clk);
660 clk = clk_get(&pdev->dev, "alternate");
664 cpu_hw_rate = clk_get_rate(clk) / CLK_HW_DIV;
667 cpufreq_qcom_hw_driver.driver_data = pdev;
669 /* Check for optional interconnect paths on CPU0 */
670 cpu_dev = get_cpu_device(0);
672 return -EPROBE_DEFER;
674 ret = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
678 ret = cpufreq_register_driver(&cpufreq_qcom_hw_driver);
680 dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n");
682 dev_dbg(&pdev->dev, "QCOM CPUFreq HW driver initialized\n");
687 static int qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
689 return cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
692 static struct platform_driver qcom_cpufreq_hw_driver = {
693 .probe = qcom_cpufreq_hw_driver_probe,
694 .remove = qcom_cpufreq_hw_driver_remove,
696 .name = "qcom-cpufreq-hw",
697 .of_match_table = qcom_cpufreq_hw_match,
701 static int __init qcom_cpufreq_hw_init(void)
703 return platform_driver_register(&qcom_cpufreq_hw_driver);
705 postcore_initcall(qcom_cpufreq_hw_init);
707 static void __exit qcom_cpufreq_hw_exit(void)
709 platform_driver_unregister(&qcom_cpufreq_hw_driver);
711 module_exit(qcom_cpufreq_hw_exit);
713 MODULE_DESCRIPTION("QCOM CPUFREQ HW Driver");
714 MODULE_LICENSE("GPL v2");