]> Git Repo - linux.git/blob - drivers/clk/clk-scmi.c
Merge patch series "riscv: Extension parsing fixes"
[linux.git] / drivers / clk / clk-scmi.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Power Interface (SCMI) Protocol based clock driver
4  *
5  * Copyright (C) 2018-2024 ARM Ltd.
6  */
7
8 #include <linux/bits.h>
9 #include <linux/clk-provider.h>
10 #include <linux/device.h>
11 #include <linux/err.h>
12 #include <linux/of.h>
13 #include <linux/module.h>
14 #include <linux/scmi_protocol.h>
15 #include <asm/div64.h>
16
17 #define NOT_ATOMIC      false
18 #define ATOMIC          true
19
20 enum scmi_clk_feats {
21         SCMI_CLK_ATOMIC_SUPPORTED,
22         SCMI_CLK_STATE_CTRL_SUPPORTED,
23         SCMI_CLK_RATE_CTRL_SUPPORTED,
24         SCMI_CLK_PARENT_CTRL_SUPPORTED,
25         SCMI_CLK_DUTY_CYCLE_SUPPORTED,
26         SCMI_CLK_FEATS_COUNT
27 };
28
29 #define SCMI_MAX_CLK_OPS        BIT(SCMI_CLK_FEATS_COUNT)
30
31 static const struct scmi_clk_proto_ops *scmi_proto_clk_ops;
32
33 struct scmi_clk {
34         u32 id;
35         struct device *dev;
36         struct clk_hw hw;
37         const struct scmi_clock_info *info;
38         const struct scmi_protocol_handle *ph;
39         struct clk_parent_data *parent_data;
40 };
41
42 #define to_scmi_clk(clk) container_of(clk, struct scmi_clk, hw)
43
44 static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
45                                           unsigned long parent_rate)
46 {
47         int ret;
48         u64 rate;
49         struct scmi_clk *clk = to_scmi_clk(hw);
50
51         ret = scmi_proto_clk_ops->rate_get(clk->ph, clk->id, &rate);
52         if (ret)
53                 return 0;
54         return rate;
55 }
56
57 static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
58                                 unsigned long *parent_rate)
59 {
60         u64 fmin, fmax, ftmp;
61         struct scmi_clk *clk = to_scmi_clk(hw);
62
63         /*
64          * We can't figure out what rate it will be, so just return the
65          * rate back to the caller. scmi_clk_recalc_rate() will be called
66          * after the rate is set and we'll know what rate the clock is
67          * running at then.
68          */
69         if (clk->info->rate_discrete)
70                 return rate;
71
72         fmin = clk->info->range.min_rate;
73         fmax = clk->info->range.max_rate;
74         if (rate <= fmin)
75                 return fmin;
76         else if (rate >= fmax)
77                 return fmax;
78
79         ftmp = rate - fmin;
80         ftmp += clk->info->range.step_size - 1; /* to round up */
81         do_div(ftmp, clk->info->range.step_size);
82
83         return ftmp * clk->info->range.step_size + fmin;
84 }
85
86 static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
87                              unsigned long parent_rate)
88 {
89         struct scmi_clk *clk = to_scmi_clk(hw);
90
91         return scmi_proto_clk_ops->rate_set(clk->ph, clk->id, rate);
92 }
93
94 static int scmi_clk_set_parent(struct clk_hw *hw, u8 parent_index)
95 {
96         struct scmi_clk *clk = to_scmi_clk(hw);
97
98         return scmi_proto_clk_ops->parent_set(clk->ph, clk->id, parent_index);
99 }
100
101 static u8 scmi_clk_get_parent(struct clk_hw *hw)
102 {
103         struct scmi_clk *clk = to_scmi_clk(hw);
104         u32 parent_id, p_idx;
105         int ret;
106
107         ret = scmi_proto_clk_ops->parent_get(clk->ph, clk->id, &parent_id);
108         if (ret)
109                 return 0;
110
111         for (p_idx = 0; p_idx < clk->info->num_parents; p_idx++) {
112                 if (clk->parent_data[p_idx].index == parent_id)
113                         break;
114         }
115
116         if (p_idx == clk->info->num_parents)
117                 return 0;
118
119         return p_idx;
120 }
121
122 static int scmi_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
123 {
124         /*
125          * Suppose all the requested rates are supported, and let firmware
126          * to handle the left work.
127          */
128         return 0;
129 }
130
131 static int scmi_clk_enable(struct clk_hw *hw)
132 {
133         struct scmi_clk *clk = to_scmi_clk(hw);
134
135         return scmi_proto_clk_ops->enable(clk->ph, clk->id, NOT_ATOMIC);
136 }
137
138 static void scmi_clk_disable(struct clk_hw *hw)
139 {
140         struct scmi_clk *clk = to_scmi_clk(hw);
141
142         scmi_proto_clk_ops->disable(clk->ph, clk->id, NOT_ATOMIC);
143 }
144
145 static int scmi_clk_atomic_enable(struct clk_hw *hw)
146 {
147         struct scmi_clk *clk = to_scmi_clk(hw);
148
149         return scmi_proto_clk_ops->enable(clk->ph, clk->id, ATOMIC);
150 }
151
152 static void scmi_clk_atomic_disable(struct clk_hw *hw)
153 {
154         struct scmi_clk *clk = to_scmi_clk(hw);
155
156         scmi_proto_clk_ops->disable(clk->ph, clk->id, ATOMIC);
157 }
158
159 static int scmi_clk_atomic_is_enabled(struct clk_hw *hw)
160 {
161         int ret;
162         bool enabled = false;
163         struct scmi_clk *clk = to_scmi_clk(hw);
164
165         ret = scmi_proto_clk_ops->state_get(clk->ph, clk->id, &enabled, ATOMIC);
166         if (ret)
167                 dev_warn(clk->dev,
168                          "Failed to get state for clock ID %d\n", clk->id);
169
170         return !!enabled;
171 }
172
173 static int scmi_clk_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
174 {
175         int ret;
176         u32 val;
177         struct scmi_clk *clk = to_scmi_clk(hw);
178
179         ret = scmi_proto_clk_ops->config_oem_get(clk->ph, clk->id,
180                                                  SCMI_CLOCK_CFG_DUTY_CYCLE,
181                                                  &val, NULL, false);
182         if (!ret) {
183                 duty->num = val;
184                 duty->den = 100;
185         } else {
186                 dev_warn(clk->dev,
187                          "Failed to get duty cycle for clock ID %d\n", clk->id);
188         }
189
190         return ret;
191 }
192
193 static int scmi_clk_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
194 {
195         int ret;
196         u32 val;
197         struct scmi_clk *clk = to_scmi_clk(hw);
198
199         /* SCMI OEM Duty Cycle is expressed as a percentage */
200         val = (duty->num * 100) / duty->den;
201         ret = scmi_proto_clk_ops->config_oem_set(clk->ph, clk->id,
202                                                  SCMI_CLOCK_CFG_DUTY_CYCLE,
203                                                  val, false);
204         if (ret)
205                 dev_warn(clk->dev,
206                          "Failed to set duty cycle(%u/%u) for clock ID %d\n",
207                          duty->num, duty->den, clk->id);
208
209         return ret;
210 }
211
212 static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk,
213                              const struct clk_ops *scmi_ops)
214 {
215         int ret;
216         unsigned long min_rate, max_rate;
217
218         struct clk_init_data init = {
219                 .flags = CLK_GET_RATE_NOCACHE,
220                 .num_parents = sclk->info->num_parents,
221                 .ops = scmi_ops,
222                 .name = sclk->info->name,
223                 .parent_data = sclk->parent_data,
224         };
225
226         sclk->hw.init = &init;
227         ret = devm_clk_hw_register(dev, &sclk->hw);
228         if (ret)
229                 return ret;
230
231         if (sclk->info->rate_discrete) {
232                 int num_rates = sclk->info->list.num_rates;
233
234                 if (num_rates <= 0)
235                         return -EINVAL;
236
237                 min_rate = sclk->info->list.rates[0];
238                 max_rate = sclk->info->list.rates[num_rates - 1];
239         } else {
240                 min_rate = sclk->info->range.min_rate;
241                 max_rate = sclk->info->range.max_rate;
242         }
243
244         clk_hw_set_rate_range(&sclk->hw, min_rate, max_rate);
245         return ret;
246 }
247
248 /**
249  * scmi_clk_ops_alloc() - Alloc and configure clock operations
250  * @dev: A device reference for devres
251  * @feats_key: A bitmap representing the desired clk_ops capabilities
252  *
253  * Allocate and configure a proper set of clock operations depending on the
254  * specifically required SCMI clock features.
255  *
256  * Return: A pointer to the allocated and configured clk_ops on success,
257  *         or NULL on allocation failure.
258  */
259 static const struct clk_ops *
260 scmi_clk_ops_alloc(struct device *dev, unsigned long feats_key)
261 {
262         struct clk_ops *ops;
263
264         ops = devm_kzalloc(dev, sizeof(*ops), GFP_KERNEL);
265         if (!ops)
266                 return NULL;
267         /*
268          * We can provide enable/disable/is_enabled atomic callbacks only if the
269          * underlying SCMI transport for an SCMI instance is configured to
270          * handle SCMI commands in an atomic manner.
271          *
272          * When no SCMI atomic transport support is available we instead provide
273          * only the prepare/unprepare API, as allowed by the clock framework
274          * when atomic calls are not available.
275          */
276         if (feats_key & BIT(SCMI_CLK_STATE_CTRL_SUPPORTED)) {
277                 if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED)) {
278                         ops->enable = scmi_clk_atomic_enable;
279                         ops->disable = scmi_clk_atomic_disable;
280                 } else {
281                         ops->prepare = scmi_clk_enable;
282                         ops->unprepare = scmi_clk_disable;
283                 }
284         }
285
286         if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED))
287                 ops->is_enabled = scmi_clk_atomic_is_enabled;
288
289         /* Rate ops */
290         ops->recalc_rate = scmi_clk_recalc_rate;
291         ops->round_rate = scmi_clk_round_rate;
292         ops->determine_rate = scmi_clk_determine_rate;
293         if (feats_key & BIT(SCMI_CLK_RATE_CTRL_SUPPORTED))
294                 ops->set_rate = scmi_clk_set_rate;
295
296         /* Parent ops */
297         ops->get_parent = scmi_clk_get_parent;
298         if (feats_key & BIT(SCMI_CLK_PARENT_CTRL_SUPPORTED))
299                 ops->set_parent = scmi_clk_set_parent;
300
301         /* Duty cycle */
302         if (feats_key & BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED)) {
303                 ops->get_duty_cycle = scmi_clk_get_duty_cycle;
304                 ops->set_duty_cycle = scmi_clk_set_duty_cycle;
305         }
306
307         return ops;
308 }
309
310 /**
311  * scmi_clk_ops_select() - Select a proper set of clock operations
312  * @sclk: A reference to an SCMI clock descriptor
313  * @atomic_capable: A flag to indicate if atomic mode is supported by the
314  *                  transport
315  * @atomic_threshold_us: Platform atomic threshold value in microseconds:
316  *                       clk_ops are atomic when clock enable latency is less
317  *                       than this threshold
318  * @clk_ops_db: A reference to the array used as a database to store all the
319  *              created clock operations combinations.
320  * @db_size: Maximum number of entries held by @clk_ops_db
321  *
322  * After having built a bitmap descriptor to represent the set of features
323  * needed by this SCMI clock, at first use it to lookup into the set of
324  * previously allocated clk_ops to check if a suitable combination of clock
325  * operations was already created; when no match is found allocate a brand new
326  * set of clk_ops satisfying the required combination of features and save it
327  * for future references.
328  *
329  * In this way only one set of clk_ops is ever created for each different
330  * combination that is effectively needed by a driver instance.
331  *
332  * Return: A pointer to the allocated and configured clk_ops on success, or
333  *         NULL otherwise.
334  */
335 static const struct clk_ops *
336 scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable,
337                     unsigned int atomic_threshold_us,
338                     const struct clk_ops **clk_ops_db, size_t db_size)
339 {
340         const struct scmi_clock_info *ci = sclk->info;
341         unsigned int feats_key = 0;
342         const struct clk_ops *ops;
343
344         /*
345          * Note that when transport is atomic but SCMI protocol did not
346          * specify (or support) an enable_latency associated with a
347          * clock, we default to use atomic operations mode.
348          */
349         if (atomic_capable && ci->enable_latency <= atomic_threshold_us)
350                 feats_key |= BIT(SCMI_CLK_ATOMIC_SUPPORTED);
351
352         if (!ci->state_ctrl_forbidden)
353                 feats_key |= BIT(SCMI_CLK_STATE_CTRL_SUPPORTED);
354
355         if (!ci->rate_ctrl_forbidden)
356                 feats_key |= BIT(SCMI_CLK_RATE_CTRL_SUPPORTED);
357
358         if (!ci->parent_ctrl_forbidden)
359                 feats_key |= BIT(SCMI_CLK_PARENT_CTRL_SUPPORTED);
360
361         if (ci->extended_config)
362                 feats_key |= BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED);
363
364         if (WARN_ON(feats_key >= db_size))
365                 return NULL;
366
367         /* Lookup previously allocated ops */
368         ops = clk_ops_db[feats_key];
369         if (ops)
370                 return ops;
371
372         /* Did not find a pre-allocated clock_ops */
373         ops = scmi_clk_ops_alloc(sclk->dev, feats_key);
374         if (!ops)
375                 return NULL;
376
377         /* Store new ops combinations */
378         clk_ops_db[feats_key] = ops;
379
380         return ops;
381 }
382
383 static int scmi_clocks_probe(struct scmi_device *sdev)
384 {
385         int idx, count, err;
386         unsigned int atomic_threshold_us;
387         bool transport_is_atomic;
388         struct clk_hw **hws;
389         struct clk_hw_onecell_data *clk_data;
390         struct device *dev = &sdev->dev;
391         struct device_node *np = dev->of_node;
392         const struct scmi_handle *handle = sdev->handle;
393         struct scmi_protocol_handle *ph;
394         const struct clk_ops *scmi_clk_ops_db[SCMI_MAX_CLK_OPS] = {};
395
396         if (!handle)
397                 return -ENODEV;
398
399         scmi_proto_clk_ops =
400                 handle->devm_protocol_get(sdev, SCMI_PROTOCOL_CLOCK, &ph);
401         if (IS_ERR(scmi_proto_clk_ops))
402                 return PTR_ERR(scmi_proto_clk_ops);
403
404         count = scmi_proto_clk_ops->count_get(ph);
405         if (count < 0) {
406                 dev_err(dev, "%pOFn: invalid clock output count\n", np);
407                 return -EINVAL;
408         }
409
410         clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, count),
411                                 GFP_KERNEL);
412         if (!clk_data)
413                 return -ENOMEM;
414
415         clk_data->num = count;
416         hws = clk_data->hws;
417
418         transport_is_atomic = handle->is_transport_atomic(handle,
419                                                           &atomic_threshold_us);
420
421         for (idx = 0; idx < count; idx++) {
422                 struct scmi_clk *sclk;
423                 const struct clk_ops *scmi_ops;
424
425                 sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL);
426                 if (!sclk)
427                         return -ENOMEM;
428
429                 sclk->info = scmi_proto_clk_ops->info_get(ph, idx);
430                 if (!sclk->info) {
431                         dev_dbg(dev, "invalid clock info for idx %d\n", idx);
432                         devm_kfree(dev, sclk);
433                         continue;
434                 }
435
436                 sclk->id = idx;
437                 sclk->ph = ph;
438                 sclk->dev = dev;
439
440                 /*
441                  * Note that the scmi_clk_ops_db is on the stack, not global,
442                  * because it cannot be shared between mulitple probe-sequences
443                  * to avoid sharing the devm_ allocated clk_ops between multiple
444                  * SCMI clk driver instances.
445                  */
446                 scmi_ops = scmi_clk_ops_select(sclk, transport_is_atomic,
447                                                atomic_threshold_us,
448                                                scmi_clk_ops_db,
449                                                ARRAY_SIZE(scmi_clk_ops_db));
450                 if (!scmi_ops)
451                         return -ENOMEM;
452
453                 /* Initialize clock parent data. */
454                 if (sclk->info->num_parents > 0) {
455                         sclk->parent_data = devm_kcalloc(dev, sclk->info->num_parents,
456                                                          sizeof(*sclk->parent_data), GFP_KERNEL);
457                         if (!sclk->parent_data)
458                                 return -ENOMEM;
459
460                         for (int i = 0; i < sclk->info->num_parents; i++) {
461                                 sclk->parent_data[i].index = sclk->info->parents[i];
462                                 sclk->parent_data[i].hw = hws[sclk->info->parents[i]];
463                         }
464                 }
465
466                 err = scmi_clk_ops_init(dev, sclk, scmi_ops);
467                 if (err) {
468                         dev_err(dev, "failed to register clock %d\n", idx);
469                         devm_kfree(dev, sclk->parent_data);
470                         devm_kfree(dev, sclk);
471                         hws[idx] = NULL;
472                 } else {
473                         dev_dbg(dev, "Registered clock:%s%s\n",
474                                 sclk->info->name,
475                                 scmi_ops->enable ? " (atomic ops)" : "");
476                         hws[idx] = &sclk->hw;
477                 }
478         }
479
480         return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
481                                            clk_data);
482 }
483
484 static const struct scmi_device_id scmi_id_table[] = {
485         { SCMI_PROTOCOL_CLOCK, "clocks" },
486         { },
487 };
488 MODULE_DEVICE_TABLE(scmi, scmi_id_table);
489
490 static struct scmi_driver scmi_clocks_driver = {
491         .name = "scmi-clocks",
492         .probe = scmi_clocks_probe,
493         .id_table = scmi_id_table,
494 };
495 module_scmi_driver(scmi_clocks_driver);
496
497 MODULE_AUTHOR("Sudeep Holla <[email protected]>");
498 MODULE_DESCRIPTION("ARM SCMI clock driver");
499 MODULE_LICENSE("GPL v2");
This page took 0.063823 seconds and 4 git commands to generate.