]> Git Repo - linux.git/blob - drivers/cpufreq/qcom-cpufreq-hw.c
crypto: akcipher - Drop sign/verify operations
[linux.git] / drivers / cpufreq / qcom-cpufreq-hw.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018, The Linux Foundation. All rights reserved.
4  */
5
6 #include <linux/bitfield.h>
7 #include <linux/clk-provider.h>
8 #include <linux/cpufreq.h>
9 #include <linux/init.h>
10 #include <linux/interconnect.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_opp.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/units.h>
21
22 #define LUT_MAX_ENTRIES                 40U
23 #define LUT_SRC                         GENMASK(31, 30)
24 #define LUT_L_VAL                       GENMASK(7, 0)
25 #define LUT_CORE_COUNT                  GENMASK(18, 16)
26 #define LUT_VOLT                        GENMASK(11, 0)
27 #define CLK_HW_DIV                      2
28 #define LUT_TURBO_IND                   1
29
30 #define GT_IRQ_STATUS                   BIT(2)
31
32 #define MAX_FREQ_DOMAINS                4
33
34 struct qcom_cpufreq_soc_data {
35         u32 reg_enable;
36         u32 reg_domain_state;
37         u32 reg_dcvs_ctrl;
38         u32 reg_freq_lut;
39         u32 reg_volt_lut;
40         u32 reg_intr_clr;
41         u32 reg_current_vote;
42         u32 reg_perf_state;
43         u8 lut_row_size;
44 };
45
46 struct qcom_cpufreq_data {
47         void __iomem *base;
48
49         /*
50          * Mutex to synchronize between de-init sequence and re-starting LMh
51          * polling/interrupts
52          */
53         struct mutex throttle_lock;
54         int throttle_irq;
55         char irq_name[15];
56         bool cancel_throttle;
57         struct delayed_work throttle_work;
58         struct cpufreq_policy *policy;
59         struct clk_hw cpu_clk;
60
61         bool per_core_dcvs;
62 };
63
64 static struct {
65         struct qcom_cpufreq_data *data;
66         const struct qcom_cpufreq_soc_data *soc_data;
67 } qcom_cpufreq;
68
69 static unsigned long cpu_hw_rate, xo_rate;
70 static bool icc_scaling_enabled;
71
72 static int qcom_cpufreq_set_bw(struct cpufreq_policy *policy,
73                                unsigned long freq_khz)
74 {
75         unsigned long freq_hz = freq_khz * 1000;
76         struct dev_pm_opp *opp;
77         struct device *dev;
78         int ret;
79
80         dev = get_cpu_device(policy->cpu);
81         if (!dev)
82                 return -ENODEV;
83
84         opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true);
85         if (IS_ERR(opp))
86                 return PTR_ERR(opp);
87
88         ret = dev_pm_opp_set_opp(dev, opp);
89         dev_pm_opp_put(opp);
90         return ret;
91 }
92
93 static int qcom_cpufreq_update_opp(struct device *cpu_dev,
94                                    unsigned long freq_khz,
95                                    unsigned long volt)
96 {
97         unsigned long freq_hz = freq_khz * 1000;
98         int ret;
99
100         /* Skip voltage update if the opp table is not available */
101         if (!icc_scaling_enabled)
102                 return dev_pm_opp_add(cpu_dev, freq_hz, volt);
103
104         ret = dev_pm_opp_adjust_voltage(cpu_dev, freq_hz, volt, volt, volt);
105         if (ret) {
106                 dev_err(cpu_dev, "Voltage update failed freq=%ld\n", freq_khz);
107                 return ret;
108         }
109
110         return dev_pm_opp_enable(cpu_dev, freq_hz);
111 }
112
113 static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
114                                         unsigned int index)
115 {
116         struct qcom_cpufreq_data *data = policy->driver_data;
117         const struct qcom_cpufreq_soc_data *soc_data = qcom_cpufreq.soc_data;
118         unsigned long freq = policy->freq_table[index].frequency;
119         unsigned int i;
120
121         writel_relaxed(index, data->base + soc_data->reg_perf_state);
122
123         if (data->per_core_dcvs)
124                 for (i = 1; i < cpumask_weight(policy->related_cpus); i++)
125                         writel_relaxed(index, data->base + soc_data->reg_perf_state + i * 4);
126
127         if (icc_scaling_enabled)
128                 qcom_cpufreq_set_bw(policy, freq);
129
130         return 0;
131 }
132
133 static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
134 {
135         unsigned int lval;
136
137         if (qcom_cpufreq.soc_data->reg_current_vote)
138                 lval = readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_current_vote) & 0x3ff;
139         else
140                 lval = readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_domain_state) & 0xff;
141
142         return lval * xo_rate;
143 }
144
145 /* Get the frequency requested by the cpufreq core for the CPU */
146 static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
147 {
148         struct qcom_cpufreq_data *data;
149         const struct qcom_cpufreq_soc_data *soc_data;
150         struct cpufreq_policy *policy;
151         unsigned int index;
152
153         policy = cpufreq_cpu_get_raw(cpu);
154         if (!policy)
155                 return 0;
156
157         data = policy->driver_data;
158         soc_data = qcom_cpufreq.soc_data;
159
160         index = readl_relaxed(data->base + soc_data->reg_perf_state);
161         index = min(index, LUT_MAX_ENTRIES - 1);
162
163         return policy->freq_table[index].frequency;
164 }
165
166 static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
167 {
168         struct qcom_cpufreq_data *data;
169         struct cpufreq_policy *policy;
170
171         policy = cpufreq_cpu_get_raw(cpu);
172         if (!policy)
173                 return 0;
174
175         data = policy->driver_data;
176
177         if (data->throttle_irq >= 0)
178                 return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
179
180         return qcom_cpufreq_get_freq(cpu);
181 }
182
183 static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
184                                                 unsigned int target_freq)
185 {
186         struct qcom_cpufreq_data *data = policy->driver_data;
187         const struct qcom_cpufreq_soc_data *soc_data = qcom_cpufreq.soc_data;
188         unsigned int index;
189         unsigned int i;
190
191         index = policy->cached_resolved_idx;
192         writel_relaxed(index, data->base + soc_data->reg_perf_state);
193
194         if (data->per_core_dcvs)
195                 for (i = 1; i < cpumask_weight(policy->related_cpus); i++)
196                         writel_relaxed(index, data->base + soc_data->reg_perf_state + i * 4);
197
198         return policy->freq_table[index].frequency;
199 }
200
201 static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
202                                     struct cpufreq_policy *policy)
203 {
204         u32 data, src, lval, i, core_count, prev_freq = 0, freq;
205         u32 volt;
206         struct cpufreq_frequency_table  *table;
207         struct dev_pm_opp *opp;
208         unsigned long rate;
209         int ret;
210         struct qcom_cpufreq_data *drv_data = policy->driver_data;
211         const struct qcom_cpufreq_soc_data *soc_data = qcom_cpufreq.soc_data;
212
213         table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL);
214         if (!table)
215                 return -ENOMEM;
216
217         ret = dev_pm_opp_of_add_table(cpu_dev);
218         if (!ret) {
219                 /* Disable all opps and cross-validate against LUT later */
220                 icc_scaling_enabled = true;
221                 for (rate = 0; ; rate++) {
222                         opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
223                         if (IS_ERR(opp))
224                                 break;
225
226                         dev_pm_opp_put(opp);
227                         dev_pm_opp_disable(cpu_dev, rate);
228                 }
229         } else if (ret != -ENODEV) {
230                 dev_err(cpu_dev, "Invalid opp table in device tree\n");
231                 kfree(table);
232                 return ret;
233         } else {
234                 policy->fast_switch_possible = true;
235                 icc_scaling_enabled = false;
236         }
237
238         for (i = 0; i < LUT_MAX_ENTRIES; i++) {
239                 data = readl_relaxed(drv_data->base + soc_data->reg_freq_lut +
240                                       i * soc_data->lut_row_size);
241                 src = FIELD_GET(LUT_SRC, data);
242                 lval = FIELD_GET(LUT_L_VAL, data);
243                 core_count = FIELD_GET(LUT_CORE_COUNT, data);
244
245                 data = readl_relaxed(drv_data->base + soc_data->reg_volt_lut +
246                                       i * soc_data->lut_row_size);
247                 volt = FIELD_GET(LUT_VOLT, data) * 1000;
248
249                 if (src)
250                         freq = xo_rate * lval / 1000;
251                 else
252                         freq = cpu_hw_rate / 1000;
253
254                 if (freq != prev_freq && core_count != LUT_TURBO_IND) {
255                         if (!qcom_cpufreq_update_opp(cpu_dev, freq, volt)) {
256                                 table[i].frequency = freq;
257                                 dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
258                                 freq, core_count);
259                         } else {
260                                 dev_warn(cpu_dev, "failed to update OPP for freq=%d\n", freq);
261                                 table[i].frequency = CPUFREQ_ENTRY_INVALID;
262                         }
263
264                 } else if (core_count == LUT_TURBO_IND) {
265                         table[i].frequency = CPUFREQ_ENTRY_INVALID;
266                 }
267
268                 /*
269                  * Two of the same frequencies with the same core counts means
270                  * end of table
271                  */
272                 if (i > 0 && prev_freq == freq) {
273                         struct cpufreq_frequency_table *prev = &table[i - 1];
274
275                         /*
276                          * Only treat the last frequency that might be a boost
277                          * as the boost frequency
278                          */
279                         if (prev->frequency == CPUFREQ_ENTRY_INVALID) {
280                                 if (!qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt)) {
281                                         prev->frequency = prev_freq;
282                                         prev->flags = CPUFREQ_BOOST_FREQ;
283                                 } else {
284                                         dev_warn(cpu_dev, "failed to update OPP for freq=%d\n",
285                                                  freq);
286                                 }
287                         }
288
289                         break;
290                 }
291
292                 prev_freq = freq;
293         }
294
295         table[i].frequency = CPUFREQ_TABLE_END;
296         policy->freq_table = table;
297         dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
298
299         return 0;
300 }
301
302 static void qcom_get_related_cpus(int index, struct cpumask *m)
303 {
304         struct device_node *cpu_np;
305         struct of_phandle_args args;
306         int cpu, ret;
307
308         for_each_possible_cpu(cpu) {
309                 cpu_np = of_cpu_device_node_get(cpu);
310                 if (!cpu_np)
311                         continue;
312
313                 ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
314                                                  "#freq-domain-cells", 0,
315                                                  &args);
316                 of_node_put(cpu_np);
317                 if (ret < 0)
318                         continue;
319
320                 if (index == args.args[0])
321                         cpumask_set_cpu(cpu, m);
322         }
323 }
324
325 static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
326 {
327         struct cpufreq_policy *policy = data->policy;
328         int cpu = cpumask_first(policy->related_cpus);
329         struct device *dev = get_cpu_device(cpu);
330         unsigned long freq_hz, throttled_freq;
331         struct dev_pm_opp *opp;
332
333         /*
334          * Get the h/w throttled frequency, normalize it using the
335          * registered opp table and use it to calculate thermal pressure.
336          */
337         freq_hz = qcom_lmh_get_throttle_freq(data);
338
339         opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
340         if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
341                 opp = dev_pm_opp_find_freq_ceil(dev, &freq_hz);
342
343         if (IS_ERR(opp)) {
344                 dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp);
345         } else {
346                 dev_pm_opp_put(opp);
347         }
348
349         throttled_freq = freq_hz / HZ_PER_KHZ;
350
351         /* Update HW pressure (the boost frequencies are accepted) */
352         arch_update_hw_pressure(policy->related_cpus, throttled_freq);
353
354         /*
355          * In the unlikely case policy is unregistered do not enable
356          * polling or h/w interrupt
357          */
358         mutex_lock(&data->throttle_lock);
359         if (data->cancel_throttle)
360                 goto out;
361
362         /*
363          * If h/w throttled frequency is higher than what cpufreq has requested
364          * for, then stop polling and switch back to interrupt mechanism.
365          */
366         if (throttled_freq >= qcom_cpufreq_get_freq(cpu))
367                 enable_irq(data->throttle_irq);
368         else
369                 mod_delayed_work(system_highpri_wq, &data->throttle_work,
370                                  msecs_to_jiffies(10));
371
372 out:
373         mutex_unlock(&data->throttle_lock);
374 }
375
376 static void qcom_lmh_dcvs_poll(struct work_struct *work)
377 {
378         struct qcom_cpufreq_data *data;
379
380         data = container_of(work, struct qcom_cpufreq_data, throttle_work.work);
381         qcom_lmh_dcvs_notify(data);
382 }
383
384 static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
385 {
386         struct qcom_cpufreq_data *c_data = data;
387
388         /* Disable interrupt and enable polling */
389         disable_irq_nosync(c_data->throttle_irq);
390         schedule_delayed_work(&c_data->throttle_work, 0);
391
392         if (qcom_cpufreq.soc_data->reg_intr_clr)
393                 writel_relaxed(GT_IRQ_STATUS,
394                                c_data->base + qcom_cpufreq.soc_data->reg_intr_clr);
395
396         return IRQ_HANDLED;
397 }
398
399 static const struct qcom_cpufreq_soc_data qcom_soc_data = {
400         .reg_enable = 0x0,
401         .reg_dcvs_ctrl = 0xbc,
402         .reg_freq_lut = 0x110,
403         .reg_volt_lut = 0x114,
404         .reg_current_vote = 0x704,
405         .reg_perf_state = 0x920,
406         .lut_row_size = 32,
407 };
408
409 static const struct qcom_cpufreq_soc_data epss_soc_data = {
410         .reg_enable = 0x0,
411         .reg_domain_state = 0x20,
412         .reg_dcvs_ctrl = 0xb0,
413         .reg_freq_lut = 0x100,
414         .reg_volt_lut = 0x200,
415         .reg_intr_clr = 0x308,
416         .reg_perf_state = 0x320,
417         .lut_row_size = 4,
418 };
419
420 static const struct of_device_id qcom_cpufreq_hw_match[] = {
421         { .compatible = "qcom,cpufreq-hw", .data = &qcom_soc_data },
422         { .compatible = "qcom,cpufreq-epss", .data = &epss_soc_data },
423         {}
424 };
425 MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match);
426
427 static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
428 {
429         struct qcom_cpufreq_data *data = policy->driver_data;
430         struct platform_device *pdev = cpufreq_get_driver_data();
431         int ret;
432
433         /*
434          * Look for LMh interrupt. If no interrupt line is specified /
435          * if there is an error, allow cpufreq to be enabled as usual.
436          */
437         data->throttle_irq = platform_get_irq_optional(pdev, index);
438         if (data->throttle_irq == -ENXIO)
439                 return 0;
440         if (data->throttle_irq < 0)
441                 return data->throttle_irq;
442
443         data->cancel_throttle = false;
444         data->policy = policy;
445
446         mutex_init(&data->throttle_lock);
447         INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
448
449         snprintf(data->irq_name, sizeof(data->irq_name), "dcvsh-irq-%u", policy->cpu);
450         ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq,
451                                    IRQF_ONESHOT | IRQF_NO_AUTOEN, data->irq_name, data);
452         if (ret) {
453                 dev_err(&pdev->dev, "Error registering %s: %d\n", data->irq_name, ret);
454                 return 0;
455         }
456
457         ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
458         if (ret)
459                 dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
460                         data->irq_name, data->throttle_irq);
461
462         return 0;
463 }
464
465 static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy)
466 {
467         struct qcom_cpufreq_data *data = policy->driver_data;
468         struct platform_device *pdev = cpufreq_get_driver_data();
469         int ret;
470
471         if (data->throttle_irq <= 0)
472                 return 0;
473
474         mutex_lock(&data->throttle_lock);
475         data->cancel_throttle = false;
476         mutex_unlock(&data->throttle_lock);
477
478         ret = irq_set_affinity_and_hint(data->throttle_irq, policy->cpus);
479         if (ret)
480                 dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
481                         data->irq_name, data->throttle_irq);
482
483         return ret;
484 }
485
486 static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy)
487 {
488         struct qcom_cpufreq_data *data = policy->driver_data;
489
490         if (data->throttle_irq <= 0)
491                 return 0;
492
493         mutex_lock(&data->throttle_lock);
494         data->cancel_throttle = true;
495         mutex_unlock(&data->throttle_lock);
496
497         cancel_delayed_work_sync(&data->throttle_work);
498         irq_set_affinity_and_hint(data->throttle_irq, NULL);
499         disable_irq_nosync(data->throttle_irq);
500
501         return 0;
502 }
503
504 static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
505 {
506         if (data->throttle_irq <= 0)
507                 return;
508
509         free_irq(data->throttle_irq, data);
510 }
511
512 static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
513 {
514         struct platform_device *pdev = cpufreq_get_driver_data();
515         struct device *dev = &pdev->dev;
516         struct of_phandle_args args;
517         struct device_node *cpu_np;
518         struct device *cpu_dev;
519         struct qcom_cpufreq_data *data;
520         int ret, index;
521
522         cpu_dev = get_cpu_device(policy->cpu);
523         if (!cpu_dev) {
524                 pr_err("%s: failed to get cpu%d device\n", __func__,
525                        policy->cpu);
526                 return -ENODEV;
527         }
528
529         cpu_np = of_cpu_device_node_get(policy->cpu);
530         if (!cpu_np)
531                 return -EINVAL;
532
533         ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
534                                          "#freq-domain-cells", 0, &args);
535         of_node_put(cpu_np);
536         if (ret)
537                 return ret;
538
539         index = args.args[0];
540         data = &qcom_cpufreq.data[index];
541
542         /* HW should be in enabled state to proceed */
543         if (!(readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_enable) & 0x1)) {
544                 dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index);
545                 return -ENODEV;
546         }
547
548         if (readl_relaxed(data->base + qcom_cpufreq.soc_data->reg_dcvs_ctrl) & 0x1)
549                 data->per_core_dcvs = true;
550
551         qcom_get_related_cpus(index, policy->cpus);
552
553         policy->driver_data = data;
554         policy->dvfs_possible_from_any_cpu = true;
555
556         ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
557         if (ret) {
558                 dev_err(dev, "Domain-%d failed to read LUT\n", index);
559                 return ret;
560         }
561
562         ret = dev_pm_opp_get_opp_count(cpu_dev);
563         if (ret <= 0) {
564                 dev_err(cpu_dev, "Failed to add OPPs\n");
565                 return -ENODEV;
566         }
567
568         if (policy_has_boost_freq(policy)) {
569                 ret = cpufreq_enable_boost_support();
570                 if (ret)
571                         dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
572         }
573
574         return qcom_cpufreq_hw_lmh_init(policy, index);
575 }
576
577 static void qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
578 {
579         struct device *cpu_dev = get_cpu_device(policy->cpu);
580         struct qcom_cpufreq_data *data = policy->driver_data;
581
582         dev_pm_opp_remove_all_dynamic(cpu_dev);
583         dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
584         qcom_cpufreq_hw_lmh_exit(data);
585         kfree(policy->freq_table);
586         kfree(data);
587 }
588
589 static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
590 {
591         struct qcom_cpufreq_data *data = policy->driver_data;
592
593         if (data->throttle_irq >= 0)
594                 enable_irq(data->throttle_irq);
595 }
596
597 static struct freq_attr *qcom_cpufreq_hw_attr[] = {
598         &cpufreq_freq_attr_scaling_available_freqs,
599         &cpufreq_freq_attr_scaling_boost_freqs,
600         NULL
601 };
602
603 static struct cpufreq_driver cpufreq_qcom_hw_driver = {
604         .flags          = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
605                           CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
606                           CPUFREQ_IS_COOLING_DEV,
607         .verify         = cpufreq_generic_frequency_table_verify,
608         .target_index   = qcom_cpufreq_hw_target_index,
609         .get            = qcom_cpufreq_hw_get,
610         .init           = qcom_cpufreq_hw_cpu_init,
611         .exit           = qcom_cpufreq_hw_cpu_exit,
612         .online         = qcom_cpufreq_hw_cpu_online,
613         .offline        = qcom_cpufreq_hw_cpu_offline,
614         .register_em    = cpufreq_register_em_with_opp,
615         .fast_switch    = qcom_cpufreq_hw_fast_switch,
616         .name           = "qcom-cpufreq-hw",
617         .attr           = qcom_cpufreq_hw_attr,
618         .ready          = qcom_cpufreq_ready,
619 };
620
621 static unsigned long qcom_cpufreq_hw_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
622 {
623         struct qcom_cpufreq_data *data = container_of(hw, struct qcom_cpufreq_data, cpu_clk);
624
625         return qcom_lmh_get_throttle_freq(data);
626 }
627
628 static const struct clk_ops qcom_cpufreq_hw_clk_ops = {
629         .recalc_rate = qcom_cpufreq_hw_recalc_rate,
630 };
631
632 static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
633 {
634         struct clk_hw_onecell_data *clk_data;
635         struct device *dev = &pdev->dev;
636         struct device *cpu_dev;
637         struct clk *clk;
638         int ret, i, num_domains;
639
640         clk = clk_get(dev, "xo");
641         if (IS_ERR(clk))
642                 return PTR_ERR(clk);
643
644         xo_rate = clk_get_rate(clk);
645         clk_put(clk);
646
647         clk = clk_get(dev, "alternate");
648         if (IS_ERR(clk))
649                 return PTR_ERR(clk);
650
651         cpu_hw_rate = clk_get_rate(clk) / CLK_HW_DIV;
652         clk_put(clk);
653
654         cpufreq_qcom_hw_driver.driver_data = pdev;
655
656         /* Check for optional interconnect paths on CPU0 */
657         cpu_dev = get_cpu_device(0);
658         if (!cpu_dev)
659                 return -EPROBE_DEFER;
660
661         ret = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
662         if (ret)
663                 return dev_err_probe(dev, ret, "Failed to find icc paths\n");
664
665         for (num_domains = 0; num_domains < MAX_FREQ_DOMAINS; num_domains++)
666                 if (!platform_get_resource(pdev, IORESOURCE_MEM, num_domains))
667                         break;
668
669         qcom_cpufreq.data = devm_kzalloc(dev, sizeof(struct qcom_cpufreq_data) * num_domains,
670                                          GFP_KERNEL);
671         if (!qcom_cpufreq.data)
672                 return -ENOMEM;
673
674         qcom_cpufreq.soc_data = of_device_get_match_data(dev);
675         if (!qcom_cpufreq.soc_data)
676                 return -ENODEV;
677
678         clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_domains), GFP_KERNEL);
679         if (!clk_data)
680                 return -ENOMEM;
681
682         clk_data->num = num_domains;
683
684         for (i = 0; i < num_domains; i++) {
685                 struct qcom_cpufreq_data *data = &qcom_cpufreq.data[i];
686                 struct clk_init_data clk_init = {};
687                 void __iomem *base;
688
689                 base = devm_platform_ioremap_resource(pdev, i);
690                 if (IS_ERR(base)) {
691                         dev_err(dev, "Failed to map resource index %d\n", i);
692                         return PTR_ERR(base);
693                 }
694
695                 data->base = base;
696
697                 /* Register CPU clock for each frequency domain */
698                 clk_init.name = kasprintf(GFP_KERNEL, "qcom_cpufreq%d", i);
699                 if (!clk_init.name)
700                         return -ENOMEM;
701
702                 clk_init.flags = CLK_GET_RATE_NOCACHE;
703                 clk_init.ops = &qcom_cpufreq_hw_clk_ops;
704                 data->cpu_clk.init = &clk_init;
705
706                 ret = devm_clk_hw_register(dev, &data->cpu_clk);
707                 if (ret < 0) {
708                         dev_err(dev, "Failed to register clock %d: %d\n", i, ret);
709                         kfree(clk_init.name);
710                         return ret;
711                 }
712
713                 clk_data->hws[i] = &data->cpu_clk;
714                 kfree(clk_init.name);
715         }
716
717         ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
718         if (ret < 0) {
719                 dev_err(dev, "Failed to add clock provider\n");
720                 return ret;
721         }
722
723         ret = cpufreq_register_driver(&cpufreq_qcom_hw_driver);
724         if (ret)
725                 dev_err(dev, "CPUFreq HW driver failed to register\n");
726         else
727                 dev_dbg(dev, "QCOM CPUFreq HW driver initialized\n");
728
729         return ret;
730 }
731
732 static void qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
733 {
734         cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
735 }
736
737 static struct platform_driver qcom_cpufreq_hw_driver = {
738         .probe = qcom_cpufreq_hw_driver_probe,
739         .remove_new = qcom_cpufreq_hw_driver_remove,
740         .driver = {
741                 .name = "qcom-cpufreq-hw",
742                 .of_match_table = qcom_cpufreq_hw_match,
743         },
744 };
745
746 static int __init qcom_cpufreq_hw_init(void)
747 {
748         return platform_driver_register(&qcom_cpufreq_hw_driver);
749 }
750 postcore_initcall(qcom_cpufreq_hw_init);
751
752 static void __exit qcom_cpufreq_hw_exit(void)
753 {
754         platform_driver_unregister(&qcom_cpufreq_hw_driver);
755 }
756 module_exit(qcom_cpufreq_hw_exit);
757
758 MODULE_DESCRIPTION("QCOM CPUFREQ HW Driver");
759 MODULE_LICENSE("GPL v2");
This page took 0.075199 seconds and 4 git commands to generate.