]>
Commit | Line | Data |
---|---|---|
6d6a1d82 SH |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * System Control and Power Interface (SCMI) Protocol based clock driver | |
4 | * | |
2641ee13 | 5 | * Copyright (C) 2018-2024 ARM Ltd. |
6d6a1d82 SH |
6 | */ |
7 | ||
2641ee13 | 8 | #include <linux/bits.h> |
6d6a1d82 SH |
9 | #include <linux/clk-provider.h> |
10 | #include <linux/device.h> | |
11 | #include <linux/err.h> | |
12 | #include <linux/of.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/scmi_protocol.h> | |
15 | #include <asm/div64.h> | |
16 | ||
03a95cf2 CM |
17 | #define NOT_ATOMIC false |
18 | #define ATOMIC true | |
19 | ||
2641ee13 CM |
20 | enum scmi_clk_feats { |
21 | SCMI_CLK_ATOMIC_SUPPORTED, | |
a1b8faf8 | 22 | SCMI_CLK_STATE_CTRL_SUPPORTED, |
c3ad1d0a | 23 | SCMI_CLK_RATE_CTRL_SUPPORTED, |
fa23e091 | 24 | SCMI_CLK_PARENT_CTRL_SUPPORTED, |
87af9481 | 25 | SCMI_CLK_DUTY_CYCLE_SUPPORTED, |
2641ee13 CM |
26 | SCMI_CLK_FEATS_COUNT |
27 | }; | |
28 | ||
29 | #define SCMI_MAX_CLK_OPS BIT(SCMI_CLK_FEATS_COUNT) | |
30 | ||
beb076bb CM |
31 | static const struct scmi_clk_proto_ops *scmi_proto_clk_ops; |
32 | ||
6d6a1d82 SH |
33 | struct scmi_clk { |
34 | u32 id; | |
1b39ff51 | 35 | struct device *dev; |
6d6a1d82 SH |
36 | struct clk_hw hw; |
37 | const struct scmi_clock_info *info; | |
beb076bb | 38 | const struct scmi_protocol_handle *ph; |
65a8a3dd | 39 | struct clk_parent_data *parent_data; |
6d6a1d82 SH |
40 | }; |
41 | ||
42 | #define to_scmi_clk(clk) container_of(clk, struct scmi_clk, hw) | |
43 | ||
44 | static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw, | |
45 | unsigned long parent_rate) | |
46 | { | |
47 | int ret; | |
48 | u64 rate; | |
49 | struct scmi_clk *clk = to_scmi_clk(hw); | |
50 | ||
beb076bb | 51 | ret = scmi_proto_clk_ops->rate_get(clk->ph, clk->id, &rate); |
6d6a1d82 SH |
52 | if (ret) |
53 | return 0; | |
54 | return rate; | |
55 | } | |
56 | ||
57 | static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate, | |
58 | unsigned long *parent_rate) | |
59 | { | |
6d6a1d82 SH |
60 | u64 fmin, fmax, ftmp; |
61 | struct scmi_clk *clk = to_scmi_clk(hw); | |
62 | ||
63 | /* | |
64 | * We can't figure out what rate it will be, so just return the | |
65 | * rate back to the caller. scmi_clk_recalc_rate() will be called | |
66 | * after the rate is set and we'll know what rate the clock is | |
67 | * running at then. | |
68 | */ | |
69 | if (clk->info->rate_discrete) | |
70 | return rate; | |
71 | ||
72 | fmin = clk->info->range.min_rate; | |
73 | fmax = clk->info->range.max_rate; | |
74 | if (rate <= fmin) | |
75 | return fmin; | |
76 | else if (rate >= fmax) | |
77 | return fmax; | |
78 | ||
79 | ftmp = rate - fmin; | |
80 | ftmp += clk->info->range.step_size - 1; /* to round up */ | |
7a8655e1 | 81 | do_div(ftmp, clk->info->range.step_size); |
6d6a1d82 | 82 | |
7a8655e1 | 83 | return ftmp * clk->info->range.step_size + fmin; |
6d6a1d82 SH |
84 | } |
85 | ||
86 | static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate, | |
87 | unsigned long parent_rate) | |
88 | { | |
89 | struct scmi_clk *clk = to_scmi_clk(hw); | |
90 | ||
beb076bb | 91 | return scmi_proto_clk_ops->rate_set(clk->ph, clk->id, rate); |
6d6a1d82 SH |
92 | } |
93 | ||
65a8a3dd PF |
94 | static int scmi_clk_set_parent(struct clk_hw *hw, u8 parent_index) |
95 | { | |
96 | struct scmi_clk *clk = to_scmi_clk(hw); | |
97 | ||
98 | return scmi_proto_clk_ops->parent_set(clk->ph, clk->id, parent_index); | |
99 | } | |
100 | ||
101 | static u8 scmi_clk_get_parent(struct clk_hw *hw) | |
102 | { | |
103 | struct scmi_clk *clk = to_scmi_clk(hw); | |
104 | u32 parent_id, p_idx; | |
105 | int ret; | |
106 | ||
107 | ret = scmi_proto_clk_ops->parent_get(clk->ph, clk->id, &parent_id); | |
108 | if (ret) | |
109 | return 0; | |
110 | ||
111 | for (p_idx = 0; p_idx < clk->info->num_parents; p_idx++) { | |
112 | if (clk->parent_data[p_idx].index == parent_id) | |
113 | break; | |
114 | } | |
115 | ||
116 | if (p_idx == clk->info->num_parents) | |
117 | return 0; | |
118 | ||
119 | return p_idx; | |
120 | } | |
121 | ||
122 | static int scmi_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) | |
123 | { | |
124 | /* | |
125 | * Suppose all the requested rates are supported, and let firmware | |
126 | * to handle the left work. | |
127 | */ | |
128 | return 0; | |
129 | } | |
130 | ||
6d6a1d82 SH |
131 | static int scmi_clk_enable(struct clk_hw *hw) |
132 | { | |
133 | struct scmi_clk *clk = to_scmi_clk(hw); | |
134 | ||
03a95cf2 | 135 | return scmi_proto_clk_ops->enable(clk->ph, clk->id, NOT_ATOMIC); |
6d6a1d82 SH |
136 | } |
137 | ||
138 | static void scmi_clk_disable(struct clk_hw *hw) | |
139 | { | |
140 | struct scmi_clk *clk = to_scmi_clk(hw); | |
141 | ||
03a95cf2 | 142 | scmi_proto_clk_ops->disable(clk->ph, clk->id, NOT_ATOMIC); |
6d6a1d82 SH |
143 | } |
144 | ||
38a0e5b7 CM |
145 | static int scmi_clk_atomic_enable(struct clk_hw *hw) |
146 | { | |
147 | struct scmi_clk *clk = to_scmi_clk(hw); | |
148 | ||
03a95cf2 | 149 | return scmi_proto_clk_ops->enable(clk->ph, clk->id, ATOMIC); |
38a0e5b7 CM |
150 | } |
151 | ||
152 | static void scmi_clk_atomic_disable(struct clk_hw *hw) | |
153 | { | |
154 | struct scmi_clk *clk = to_scmi_clk(hw); | |
155 | ||
03a95cf2 | 156 | scmi_proto_clk_ops->disable(clk->ph, clk->id, ATOMIC); |
38a0e5b7 CM |
157 | } |
158 | ||
fc953d40 | 159 | static int __scmi_clk_is_enabled(struct clk_hw *hw, bool atomic) |
1b39ff51 CM |
160 | { |
161 | int ret; | |
162 | bool enabled = false; | |
163 | struct scmi_clk *clk = to_scmi_clk(hw); | |
164 | ||
fc953d40 | 165 | ret = scmi_proto_clk_ops->state_get(clk->ph, clk->id, &enabled, atomic); |
1b39ff51 CM |
166 | if (ret) |
167 | dev_warn(clk->dev, | |
168 | "Failed to get state for clock ID %d\n", clk->id); | |
169 | ||
170 | return !!enabled; | |
171 | } | |
172 | ||
fc953d40 PF |
173 | static int scmi_clk_atomic_is_enabled(struct clk_hw *hw) |
174 | { | |
175 | return __scmi_clk_is_enabled(hw, ATOMIC); | |
176 | } | |
177 | ||
178 | static int scmi_clk_is_enabled(struct clk_hw *hw) | |
179 | { | |
180 | return __scmi_clk_is_enabled(hw, NOT_ATOMIC); | |
181 | } | |
182 | ||
87af9481 CM |
183 | static int scmi_clk_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) |
184 | { | |
185 | int ret; | |
186 | u32 val; | |
187 | struct scmi_clk *clk = to_scmi_clk(hw); | |
188 | ||
189 | ret = scmi_proto_clk_ops->config_oem_get(clk->ph, clk->id, | |
190 | SCMI_CLOCK_CFG_DUTY_CYCLE, | |
191 | &val, NULL, false); | |
192 | if (!ret) { | |
193 | duty->num = val; | |
194 | duty->den = 100; | |
195 | } else { | |
196 | dev_warn(clk->dev, | |
197 | "Failed to get duty cycle for clock ID %d\n", clk->id); | |
198 | } | |
199 | ||
200 | return ret; | |
201 | } | |
202 | ||
203 | static int scmi_clk_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty) | |
204 | { | |
205 | int ret; | |
206 | u32 val; | |
207 | struct scmi_clk *clk = to_scmi_clk(hw); | |
208 | ||
209 | /* SCMI OEM Duty Cycle is expressed as a percentage */ | |
210 | val = (duty->num * 100) / duty->den; | |
211 | ret = scmi_proto_clk_ops->config_oem_set(clk->ph, clk->id, | |
212 | SCMI_CLOCK_CFG_DUTY_CYCLE, | |
213 | val, false); | |
214 | if (ret) | |
215 | dev_warn(clk->dev, | |
216 | "Failed to set duty cycle(%u/%u) for clock ID %d\n", | |
217 | duty->num, duty->den, clk->id); | |
218 | ||
219 | return ret; | |
220 | } | |
221 | ||
38a0e5b7 CM |
222 | static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk, |
223 | const struct clk_ops *scmi_ops) | |
6d6a1d82 SH |
224 | { |
225 | int ret; | |
fcd2e0de SH |
226 | unsigned long min_rate, max_rate; |
227 | ||
6d6a1d82 SH |
228 | struct clk_init_data init = { |
229 | .flags = CLK_GET_RATE_NOCACHE, | |
65a8a3dd | 230 | .num_parents = sclk->info->num_parents, |
38a0e5b7 | 231 | .ops = scmi_ops, |
6d6a1d82 | 232 | .name = sclk->info->name, |
65a8a3dd | 233 | .parent_data = sclk->parent_data, |
6d6a1d82 SH |
234 | }; |
235 | ||
236 | sclk->hw.init = &init; | |
237 | ret = devm_clk_hw_register(dev, &sclk->hw); | |
fcd2e0de SH |
238 | if (ret) |
239 | return ret; | |
240 | ||
241 | if (sclk->info->rate_discrete) { | |
242 | int num_rates = sclk->info->list.num_rates; | |
243 | ||
244 | if (num_rates <= 0) | |
245 | return -EINVAL; | |
246 | ||
247 | min_rate = sclk->info->list.rates[0]; | |
248 | max_rate = sclk->info->list.rates[num_rates - 1]; | |
249 | } else { | |
250 | min_rate = sclk->info->range.min_rate; | |
251 | max_rate = sclk->info->range.max_rate; | |
252 | } | |
253 | ||
254 | clk_hw_set_rate_range(&sclk->hw, min_rate, max_rate); | |
6d6a1d82 SH |
255 | return ret; |
256 | } | |
257 | ||
2641ee13 CM |
258 | /** |
259 | * scmi_clk_ops_alloc() - Alloc and configure clock operations | |
260 | * @dev: A device reference for devres | |
261 | * @feats_key: A bitmap representing the desired clk_ops capabilities | |
262 | * | |
263 | * Allocate and configure a proper set of clock operations depending on the | |
264 | * specifically required SCMI clock features. | |
265 | * | |
266 | * Return: A pointer to the allocated and configured clk_ops on success, | |
267 | * or NULL on allocation failure. | |
268 | */ | |
269 | static const struct clk_ops * | |
270 | scmi_clk_ops_alloc(struct device *dev, unsigned long feats_key) | |
271 | { | |
272 | struct clk_ops *ops; | |
273 | ||
274 | ops = devm_kzalloc(dev, sizeof(*ops), GFP_KERNEL); | |
275 | if (!ops) | |
276 | return NULL; | |
277 | /* | |
278 | * We can provide enable/disable/is_enabled atomic callbacks only if the | |
279 | * underlying SCMI transport for an SCMI instance is configured to | |
280 | * handle SCMI commands in an atomic manner. | |
281 | * | |
282 | * When no SCMI atomic transport support is available we instead provide | |
283 | * only the prepare/unprepare API, as allowed by the clock framework | |
284 | * when atomic calls are not available. | |
285 | */ | |
a1b8faf8 CM |
286 | if (feats_key & BIT(SCMI_CLK_STATE_CTRL_SUPPORTED)) { |
287 | if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED)) { | |
288 | ops->enable = scmi_clk_atomic_enable; | |
289 | ops->disable = scmi_clk_atomic_disable; | |
290 | } else { | |
291 | ops->prepare = scmi_clk_enable; | |
292 | ops->unprepare = scmi_clk_disable; | |
293 | } | |
2641ee13 CM |
294 | } |
295 | ||
a1b8faf8 CM |
296 | if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED)) |
297 | ops->is_enabled = scmi_clk_atomic_is_enabled; | |
fc953d40 PF |
298 | else |
299 | ops->is_prepared = scmi_clk_is_enabled; | |
a1b8faf8 | 300 | |
2641ee13 CM |
301 | /* Rate ops */ |
302 | ops->recalc_rate = scmi_clk_recalc_rate; | |
303 | ops->round_rate = scmi_clk_round_rate; | |
304 | ops->determine_rate = scmi_clk_determine_rate; | |
c3ad1d0a CM |
305 | if (feats_key & BIT(SCMI_CLK_RATE_CTRL_SUPPORTED)) |
306 | ops->set_rate = scmi_clk_set_rate; | |
2641ee13 CM |
307 | |
308 | /* Parent ops */ | |
309 | ops->get_parent = scmi_clk_get_parent; | |
fa23e091 CM |
310 | if (feats_key & BIT(SCMI_CLK_PARENT_CTRL_SUPPORTED)) |
311 | ops->set_parent = scmi_clk_set_parent; | |
2641ee13 | 312 | |
87af9481 CM |
313 | /* Duty cycle */ |
314 | if (feats_key & BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED)) { | |
315 | ops->get_duty_cycle = scmi_clk_get_duty_cycle; | |
316 | ops->set_duty_cycle = scmi_clk_set_duty_cycle; | |
317 | } | |
318 | ||
2641ee13 CM |
319 | return ops; |
320 | } | |
321 | ||
322 | /** | |
323 | * scmi_clk_ops_select() - Select a proper set of clock operations | |
324 | * @sclk: A reference to an SCMI clock descriptor | |
325 | * @atomic_capable: A flag to indicate if atomic mode is supported by the | |
326 | * transport | |
327 | * @atomic_threshold_us: Platform atomic threshold value in microseconds: | |
328 | * clk_ops are atomic when clock enable latency is less | |
329 | * than this threshold | |
330 | * @clk_ops_db: A reference to the array used as a database to store all the | |
331 | * created clock operations combinations. | |
332 | * @db_size: Maximum number of entries held by @clk_ops_db | |
333 | * | |
334 | * After having built a bitmap descriptor to represent the set of features | |
335 | * needed by this SCMI clock, at first use it to lookup into the set of | |
336 | * previously allocated clk_ops to check if a suitable combination of clock | |
337 | * operations was already created; when no match is found allocate a brand new | |
338 | * set of clk_ops satisfying the required combination of features and save it | |
339 | * for future references. | |
340 | * | |
341 | * In this way only one set of clk_ops is ever created for each different | |
342 | * combination that is effectively needed by a driver instance. | |
343 | * | |
344 | * Return: A pointer to the allocated and configured clk_ops on success, or | |
345 | * NULL otherwise. | |
346 | */ | |
347 | static const struct clk_ops * | |
348 | scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable, | |
349 | unsigned int atomic_threshold_us, | |
350 | const struct clk_ops **clk_ops_db, size_t db_size) | |
351 | { | |
352 | const struct scmi_clock_info *ci = sclk->info; | |
353 | unsigned int feats_key = 0; | |
354 | const struct clk_ops *ops; | |
355 | ||
356 | /* | |
357 | * Note that when transport is atomic but SCMI protocol did not | |
358 | * specify (or support) an enable_latency associated with a | |
359 | * clock, we default to use atomic operations mode. | |
360 | */ | |
361 | if (atomic_capable && ci->enable_latency <= atomic_threshold_us) | |
362 | feats_key |= BIT(SCMI_CLK_ATOMIC_SUPPORTED); | |
363 | ||
a1b8faf8 CM |
364 | if (!ci->state_ctrl_forbidden) |
365 | feats_key |= BIT(SCMI_CLK_STATE_CTRL_SUPPORTED); | |
366 | ||
c3ad1d0a CM |
367 | if (!ci->rate_ctrl_forbidden) |
368 | feats_key |= BIT(SCMI_CLK_RATE_CTRL_SUPPORTED); | |
369 | ||
fa23e091 CM |
370 | if (!ci->parent_ctrl_forbidden) |
371 | feats_key |= BIT(SCMI_CLK_PARENT_CTRL_SUPPORTED); | |
372 | ||
87af9481 CM |
373 | if (ci->extended_config) |
374 | feats_key |= BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED); | |
375 | ||
2641ee13 CM |
376 | if (WARN_ON(feats_key >= db_size)) |
377 | return NULL; | |
378 | ||
379 | /* Lookup previously allocated ops */ | |
380 | ops = clk_ops_db[feats_key]; | |
381 | if (ops) | |
382 | return ops; | |
383 | ||
384 | /* Did not find a pre-allocated clock_ops */ | |
385 | ops = scmi_clk_ops_alloc(sclk->dev, feats_key); | |
386 | if (!ops) | |
387 | return NULL; | |
388 | ||
389 | /* Store new ops combinations */ | |
390 | clk_ops_db[feats_key] = ops; | |
391 | ||
392 | return ops; | |
393 | } | |
394 | ||
6d6a1d82 SH |
395 | static int scmi_clocks_probe(struct scmi_device *sdev) |
396 | { | |
397 | int idx, count, err; | |
2641ee13 CM |
398 | unsigned int atomic_threshold_us; |
399 | bool transport_is_atomic; | |
6d6a1d82 SH |
400 | struct clk_hw **hws; |
401 | struct clk_hw_onecell_data *clk_data; | |
402 | struct device *dev = &sdev->dev; | |
403 | struct device_node *np = dev->of_node; | |
404 | const struct scmi_handle *handle = sdev->handle; | |
beb076bb | 405 | struct scmi_protocol_handle *ph; |
2641ee13 | 406 | const struct clk_ops *scmi_clk_ops_db[SCMI_MAX_CLK_OPS] = {}; |
6d6a1d82 | 407 | |
beb076bb | 408 | if (!handle) |
6d6a1d82 SH |
409 | return -ENODEV; |
410 | ||
beb076bb CM |
411 | scmi_proto_clk_ops = |
412 | handle->devm_protocol_get(sdev, SCMI_PROTOCOL_CLOCK, &ph); | |
413 | if (IS_ERR(scmi_proto_clk_ops)) | |
414 | return PTR_ERR(scmi_proto_clk_ops); | |
415 | ||
416 | count = scmi_proto_clk_ops->count_get(ph); | |
6d6a1d82 | 417 | if (count < 0) { |
e665f029 | 418 | dev_err(dev, "%pOFn: invalid clock output count\n", np); |
6d6a1d82 SH |
419 | return -EINVAL; |
420 | } | |
421 | ||
0ed2dd03 KC |
422 | clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, count), |
423 | GFP_KERNEL); | |
6d6a1d82 SH |
424 | if (!clk_data) |
425 | return -ENOMEM; | |
426 | ||
427 | clk_data->num = count; | |
428 | hws = clk_data->hws; | |
429 | ||
2641ee13 CM |
430 | transport_is_atomic = handle->is_transport_atomic(handle, |
431 | &atomic_threshold_us); | |
38a0e5b7 | 432 | |
6d6a1d82 SH |
433 | for (idx = 0; idx < count; idx++) { |
434 | struct scmi_clk *sclk; | |
38a0e5b7 | 435 | const struct clk_ops *scmi_ops; |
6d6a1d82 SH |
436 | |
437 | sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL); | |
438 | if (!sclk) | |
439 | return -ENOMEM; | |
440 | ||
beb076bb | 441 | sclk->info = scmi_proto_clk_ops->info_get(ph, idx); |
6d6a1d82 SH |
442 | if (!sclk->info) { |
443 | dev_dbg(dev, "invalid clock info for idx %d\n", idx); | |
3537a75e | 444 | devm_kfree(dev, sclk); |
6d6a1d82 SH |
445 | continue; |
446 | } | |
447 | ||
448 | sclk->id = idx; | |
beb076bb | 449 | sclk->ph = ph; |
1b39ff51 | 450 | sclk->dev = dev; |
6d6a1d82 | 451 | |
38a0e5b7 | 452 | /* |
2641ee13 CM |
453 | * Note that the scmi_clk_ops_db is on the stack, not global, |
454 | * because it cannot be shared between mulitple probe-sequences | |
455 | * to avoid sharing the devm_ allocated clk_ops between multiple | |
456 | * SCMI clk driver instances. | |
38a0e5b7 | 457 | */ |
2641ee13 CM |
458 | scmi_ops = scmi_clk_ops_select(sclk, transport_is_atomic, |
459 | atomic_threshold_us, | |
460 | scmi_clk_ops_db, | |
461 | ARRAY_SIZE(scmi_clk_ops_db)); | |
462 | if (!scmi_ops) | |
463 | return -ENOMEM; | |
38a0e5b7 | 464 | |
65a8a3dd PF |
465 | /* Initialize clock parent data. */ |
466 | if (sclk->info->num_parents > 0) { | |
467 | sclk->parent_data = devm_kcalloc(dev, sclk->info->num_parents, | |
468 | sizeof(*sclk->parent_data), GFP_KERNEL); | |
469 | if (!sclk->parent_data) | |
470 | return -ENOMEM; | |
471 | ||
472 | for (int i = 0; i < sclk->info->num_parents; i++) { | |
473 | sclk->parent_data[i].index = sclk->info->parents[i]; | |
474 | sclk->parent_data[i].hw = hws[sclk->info->parents[i]]; | |
475 | } | |
476 | } | |
477 | ||
38a0e5b7 | 478 | err = scmi_clk_ops_init(dev, sclk, scmi_ops); |
6d6a1d82 SH |
479 | if (err) { |
480 | dev_err(dev, "failed to register clock %d\n", idx); | |
65a8a3dd | 481 | devm_kfree(dev, sclk->parent_data); |
6d6a1d82 SH |
482 | devm_kfree(dev, sclk); |
483 | hws[idx] = NULL; | |
484 | } else { | |
38a0e5b7 CM |
485 | dev_dbg(dev, "Registered clock:%s%s\n", |
486 | sclk->info->name, | |
2641ee13 | 487 | scmi_ops->enable ? " (atomic ops)" : ""); |
6d6a1d82 SH |
488 | hws[idx] = &sclk->hw; |
489 | } | |
490 | } | |
491 | ||
7f9badfc SH |
492 | return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, |
493 | clk_data); | |
6d6a1d82 SH |
494 | } |
495 | ||
496 | static const struct scmi_device_id scmi_id_table[] = { | |
43998dfe | 497 | { SCMI_PROTOCOL_CLOCK, "clocks" }, |
6d6a1d82 SH |
498 | { }, |
499 | }; | |
500 | MODULE_DEVICE_TABLE(scmi, scmi_id_table); | |
501 | ||
502 | static struct scmi_driver scmi_clocks_driver = { | |
503 | .name = "scmi-clocks", | |
504 | .probe = scmi_clocks_probe, | |
6d6a1d82 SH |
505 | .id_table = scmi_id_table, |
506 | }; | |
507 | module_scmi_driver(scmi_clocks_driver); | |
508 | ||
509 | MODULE_AUTHOR("Sudeep Holla <[email protected]>"); | |
510 | MODULE_DESCRIPTION("ARM SCMI clock driver"); | |
511 | MODULE_LICENSE("GPL v2"); |