]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
1da177e4 | 2 | /* |
f8ce2547 | 3 | * linux/include/linux/clk.h |
1da177e4 LT |
4 | * |
5 | * Copyright (C) 2004 ARM Limited. | |
6 | * Written by Deep Blue Solutions Limited. | |
b2476490 | 7 | * Copyright (C) 2011-2012 Linaro Ltd <[email protected]> |
1da177e4 | 8 | */ |
686f8c5d TP |
9 | #ifndef __LINUX_CLK_H |
10 | #define __LINUX_CLK_H | |
1da177e4 | 11 | |
9f1612d3 | 12 | #include <linux/err.h> |
40d3e0f4 | 13 | #include <linux/kernel.h> |
b2476490 | 14 | #include <linux/notifier.h> |
40d3e0f4 | 15 | |
1da177e4 | 16 | struct device; |
b2476490 | 17 | struct clk; |
71a2f115 KM |
18 | struct device_node; |
19 | struct of_phandle_args; | |
b2476490 | 20 | |
b2476490 MT |
21 | /** |
22 | * DOC: clk notifier callback types | |
23 | * | |
24 | * PRE_RATE_CHANGE - called immediately before the clk rate is changed, | |
25 | * to indicate that the rate change will proceed. Drivers must | |
26 | * immediately terminate any operations that will be affected by the | |
fb72a059 SB |
27 | * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK, |
28 | * NOTIFY_STOP or NOTIFY_BAD. | |
b2476490 MT |
29 | * |
30 | * ABORT_RATE_CHANGE: called if the rate change failed for some reason | |
31 | * after PRE_RATE_CHANGE. In this case, all registered notifiers on | |
32 | * the clk will be called with ABORT_RATE_CHANGE. Callbacks must | |
fb72a059 | 33 | * always return NOTIFY_DONE or NOTIFY_OK. |
b2476490 MT |
34 | * |
35 | * POST_RATE_CHANGE - called after the clk rate change has successfully | |
fb72a059 | 36 | * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK. |
b2476490 | 37 | * |
1da177e4 | 38 | */ |
b2476490 MT |
39 | #define PRE_RATE_CHANGE BIT(0) |
40 | #define POST_RATE_CHANGE BIT(1) | |
41 | #define ABORT_RATE_CHANGE BIT(2) | |
1da177e4 | 42 | |
b2476490 MT |
43 | /** |
44 | * struct clk_notifier - associate a clk with a notifier | |
45 | * @clk: struct clk * to associate the notifier with | |
46 | * @notifier_head: a blocking_notifier_head for this clk | |
47 | * @node: linked list pointers | |
48 | * | |
49 | * A list of struct clk_notifier is maintained by the notifier code. | |
50 | * An entry is created whenever code registers the first notifier on a | |
51 | * particular @clk. Future notifiers on that @clk are added to the | |
52 | * @notifier_head. | |
53 | */ | |
54 | struct clk_notifier { | |
55 | struct clk *clk; | |
56 | struct srcu_notifier_head notifier_head; | |
57 | struct list_head node; | |
58 | }; | |
1da177e4 | 59 | |
b2476490 MT |
60 | /** |
61 | * struct clk_notifier_data - rate data to pass to the notifier callback | |
62 | * @clk: struct clk * being changed | |
63 | * @old_rate: previous rate of this clk | |
64 | * @new_rate: new rate of this clk | |
65 | * | |
66 | * For a pre-notifier, old_rate is the clk's rate before this rate | |
67 | * change, and new_rate is what the rate will be in the future. For a | |
68 | * post-notifier, old_rate and new_rate are both set to the clk's | |
69 | * current rate (this was done to optimize the implementation). | |
1da177e4 | 70 | */ |
b2476490 MT |
71 | struct clk_notifier_data { |
72 | struct clk *clk; | |
73 | unsigned long old_rate; | |
74 | unsigned long new_rate; | |
75 | }; | |
76 | ||
266e4e9d DA |
77 | /** |
78 | * struct clk_bulk_data - Data used for bulk clk operations. | |
79 | * | |
80 | * @id: clock consumer ID | |
81 | * @clk: struct clk * to store the associated clock | |
82 | * | |
83 | * The CLK APIs provide a series of clk_bulk_() API calls as | |
84 | * a convenience to consumers which require multiple clks. This | |
85 | * structure is used to manage data for these calls. | |
86 | */ | |
87 | struct clk_bulk_data { | |
88 | const char *id; | |
89 | struct clk *clk; | |
90 | }; | |
91 | ||
e81b87d2 KK |
92 | #ifdef CONFIG_COMMON_CLK |
93 | ||
86bcfa2e | 94 | /** |
b90f3726 | 95 | * clk_notifier_register - register a clock rate-change notifier callback |
86bcfa2e MT |
96 | * @clk: clock whose rate we are interested in |
97 | * @nb: notifier block with callback function pointer | |
98 | * | |
99 | * ProTip: debugging across notifier chains can be frustrating. Make sure that | |
100 | * your notifier callback function prints a nice big warning in case of | |
101 | * failure. | |
102 | */ | |
b2476490 MT |
103 | int clk_notifier_register(struct clk *clk, struct notifier_block *nb); |
104 | ||
86bcfa2e | 105 | /** |
b90f3726 | 106 | * clk_notifier_unregister - unregister a clock rate-change notifier callback |
86bcfa2e MT |
107 | * @clk: clock whose rate we are no longer interested in |
108 | * @nb: notifier block which will be unregistered | |
109 | */ | |
b2476490 MT |
110 | int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); |
111 | ||
6d30d50d JB |
112 | /** |
113 | * devm_clk_notifier_register - register a managed rate-change notifier callback | |
114 | * @dev: device for clock "consumer" | |
115 | * @clk: clock whose rate we are interested in | |
116 | * @nb: notifier block with callback function pointer | |
117 | * | |
118 | * Returns 0 on success, -EERROR otherwise | |
119 | */ | |
e6fb7aee JB |
120 | int devm_clk_notifier_register(struct device *dev, struct clk *clk, |
121 | struct notifier_block *nb); | |
6d30d50d | 122 | |
5279fc40 BB |
123 | /** |
124 | * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion) | |
125 | * for a clock source. | |
126 | * @clk: clock source | |
127 | * | |
128 | * This gets the clock source accuracy expressed in ppb. | |
129 | * A perfect clock returns 0. | |
130 | */ | |
131 | long clk_get_accuracy(struct clk *clk); | |
132 | ||
e59c5371 MT |
133 | /** |
134 | * clk_set_phase - adjust the phase shift of a clock signal | |
135 | * @clk: clock signal source | |
136 | * @degrees: number of degrees the signal is shifted | |
137 | * | |
138 | * Shifts the phase of a clock signal by the specified degrees. Returns 0 on | |
139 | * success, -EERROR otherwise. | |
140 | */ | |
141 | int clk_set_phase(struct clk *clk, int degrees); | |
142 | ||
143 | /** | |
144 | * clk_get_phase - return the phase shift of a clock signal | |
145 | * @clk: clock signal source | |
146 | * | |
147 | * Returns the phase shift of a clock node in degrees, otherwise returns | |
148 | * -EERROR. | |
149 | */ | |
150 | int clk_get_phase(struct clk *clk); | |
151 | ||
9fba738a JB |
152 | /** |
153 | * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal | |
154 | * @clk: clock signal source | |
155 | * @num: numerator of the duty cycle ratio to be applied | |
156 | * @den: denominator of the duty cycle ratio to be applied | |
157 | * | |
158 | * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on | |
159 | * success, -EERROR otherwise. | |
160 | */ | |
161 | int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den); | |
162 | ||
163 | /** | |
9d1c94a6 | 164 | * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal |
9fba738a JB |
165 | * @clk: clock signal source |
166 | * @scale: scaling factor to be applied to represent the ratio as an integer | |
167 | * | |
168 | * Returns the duty cycle ratio multiplied by the scale provided, otherwise | |
169 | * returns -EERROR. | |
170 | */ | |
171 | int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale); | |
172 | ||
3d3801ef MT |
173 | /** |
174 | * clk_is_match - check if two clk's point to the same hardware clock | |
175 | * @p: clk compared against q | |
176 | * @q: clk compared against p | |
177 | * | |
178 | * Returns true if the two struct clk pointers both point to the same hardware | |
0e056eb5 MCC |
179 | * clock node. Put differently, returns true if @p and @q |
180 | * share the same &struct clk_core object. | |
3d3801ef MT |
181 | * |
182 | * Returns false otherwise. Note that two NULL clks are treated as matching. | |
183 | */ | |
184 | bool clk_is_match(const struct clk *p, const struct clk *q); | |
185 | ||
2746f13f BD |
186 | /** |
187 | * clk_rate_exclusive_get - get exclusivity over the rate control of a | |
188 | * producer | |
189 | * @clk: clock source | |
190 | * | |
191 | * This function allows drivers to get exclusive control over the rate of a | |
192 | * provider. It prevents any other consumer to execute, even indirectly, | |
193 | * opereation which could alter the rate of the provider or cause glitches | |
194 | * | |
195 | * If exlusivity is claimed more than once on clock, even by the same driver, | |
196 | * the rate effectively gets locked as exclusivity can't be preempted. | |
197 | * | |
198 | * Must not be called from within atomic context. | |
199 | * | |
200 | * Returns success (0) or negative errno. | |
201 | */ | |
202 | int clk_rate_exclusive_get(struct clk *clk); | |
203 | ||
b0cde62e UKK |
204 | /** |
205 | * devm_clk_rate_exclusive_get - devm variant of clk_rate_exclusive_get | |
206 | * @dev: device the exclusivity is bound to | |
207 | * @clk: clock source | |
208 | * | |
209 | * Calls clk_rate_exclusive_get() on @clk and registers a devm cleanup handler | |
210 | * on @dev to call clk_rate_exclusive_put(). | |
211 | * | |
212 | * Must not be called from within atomic context. | |
213 | */ | |
214 | int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk); | |
215 | ||
2746f13f BD |
216 | /** |
217 | * clk_rate_exclusive_put - release exclusivity over the rate control of a | |
218 | * producer | |
219 | * @clk: clock source | |
220 | * | |
221 | * This function allows drivers to release the exclusivity it previously got | |
222 | * from clk_rate_exclusive_get() | |
223 | * | |
224 | * The caller must balance the number of clk_rate_exclusive_get() and | |
225 | * clk_rate_exclusive_put() calls. | |
226 | * | |
227 | * Must not be called from within atomic context. | |
228 | */ | |
229 | void clk_rate_exclusive_put(struct clk *clk); | |
230 | ||
5279fc40 BB |
231 | #else |
232 | ||
e81b87d2 KK |
233 | static inline int clk_notifier_register(struct clk *clk, |
234 | struct notifier_block *nb) | |
235 | { | |
236 | return -ENOTSUPP; | |
237 | } | |
238 | ||
239 | static inline int clk_notifier_unregister(struct clk *clk, | |
240 | struct notifier_block *nb) | |
241 | { | |
242 | return -ENOTSUPP; | |
243 | } | |
244 | ||
e6fb7aee JB |
245 | static inline int devm_clk_notifier_register(struct device *dev, |
246 | struct clk *clk, | |
247 | struct notifier_block *nb) | |
248 | { | |
249 | return -ENOTSUPP; | |
250 | } | |
251 | ||
5279fc40 BB |
252 | static inline long clk_get_accuracy(struct clk *clk) |
253 | { | |
254 | return -ENOTSUPP; | |
255 | } | |
256 | ||
e59c5371 MT |
257 | static inline long clk_set_phase(struct clk *clk, int phase) |
258 | { | |
259 | return -ENOTSUPP; | |
260 | } | |
261 | ||
262 | static inline long clk_get_phase(struct clk *clk) | |
263 | { | |
264 | return -ENOTSUPP; | |
265 | } | |
266 | ||
9fba738a JB |
267 | static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num, |
268 | unsigned int den) | |
269 | { | |
270 | return -ENOTSUPP; | |
271 | } | |
272 | ||
273 | static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk, | |
274 | unsigned int scale) | |
275 | { | |
276 | return 0; | |
277 | } | |
278 | ||
3d3801ef MT |
279 | static inline bool clk_is_match(const struct clk *p, const struct clk *q) |
280 | { | |
281 | return p == q; | |
282 | } | |
283 | ||
2746f13f BD |
284 | static inline int clk_rate_exclusive_get(struct clk *clk) |
285 | { | |
286 | return 0; | |
287 | } | |
288 | ||
7f1dd39a UKK |
289 | static inline int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk) |
290 | { | |
291 | return 0; | |
292 | } | |
293 | ||
2746f13f BD |
294 | static inline void clk_rate_exclusive_put(struct clk *clk) {} |
295 | ||
7e87aed9 | 296 | #endif |
1da177e4 | 297 | |
0bfa0820 | 298 | #ifdef CONFIG_HAVE_CLK_PREPARE |
93abe8e4 VK |
299 | /** |
300 | * clk_prepare - prepare a clock source | |
301 | * @clk: clock source | |
302 | * | |
303 | * This prepares the clock source for use. | |
304 | * | |
305 | * Must not be called from within atomic context. | |
306 | */ | |
93abe8e4 | 307 | int clk_prepare(struct clk *clk); |
266e4e9d DA |
308 | int __must_check clk_bulk_prepare(int num_clks, |
309 | const struct clk_bulk_data *clks); | |
0bfa0820 NP |
310 | |
311 | /** | |
312 | * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it. | |
313 | * @clk: clock source | |
314 | * | |
315 | * Returns true if clk_prepare() implicitly enables the clock, effectively | |
316 | * making clk_enable()/clk_disable() no-ops, false otherwise. | |
317 | * | |
318 | * This is of interest mainly to the power management code where actually | |
319 | * disabling the clock also requires unpreparing it to have any material | |
320 | * effect. | |
321 | * | |
322 | * Regardless of the value returned here, the caller must always invoke | |
323 | * clk_enable() or clk_prepare_enable() and counterparts for usage counts | |
324 | * to be right. | |
325 | */ | |
326 | bool clk_is_enabled_when_prepared(struct clk *clk); | |
93abe8e4 VK |
327 | #else |
328 | static inline int clk_prepare(struct clk *clk) | |
329 | { | |
330 | might_sleep(); | |
331 | return 0; | |
332 | } | |
266e4e9d | 333 | |
570aaec7 AS |
334 | static inline int __must_check |
335 | clk_bulk_prepare(int num_clks, const struct clk_bulk_data *clks) | |
266e4e9d DA |
336 | { |
337 | might_sleep(); | |
338 | return 0; | |
339 | } | |
0bfa0820 NP |
340 | |
341 | static inline bool clk_is_enabled_when_prepared(struct clk *clk) | |
342 | { | |
343 | return false; | |
344 | } | |
93abe8e4 VK |
345 | #endif |
346 | ||
347 | /** | |
348 | * clk_unprepare - undo preparation of a clock source | |
349 | * @clk: clock source | |
350 | * | |
351 | * This undoes a previously prepared clock. The caller must balance | |
352 | * the number of prepare and unprepare calls. | |
353 | * | |
354 | * Must not be called from within atomic context. | |
355 | */ | |
356 | #ifdef CONFIG_HAVE_CLK_PREPARE | |
357 | void clk_unprepare(struct clk *clk); | |
266e4e9d | 358 | void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks); |
93abe8e4 VK |
359 | #else |
360 | static inline void clk_unprepare(struct clk *clk) | |
361 | { | |
362 | might_sleep(); | |
363 | } | |
570aaec7 AS |
364 | static inline void clk_bulk_unprepare(int num_clks, |
365 | const struct clk_bulk_data *clks) | |
266e4e9d DA |
366 | { |
367 | might_sleep(); | |
368 | } | |
93abe8e4 VK |
369 | #endif |
370 | ||
371 | #ifdef CONFIG_HAVE_CLK | |
1da177e4 LT |
372 | /** |
373 | * clk_get - lookup and obtain a reference to a clock producer. | |
374 | * @dev: device for clock "consumer" | |
a58b3a4a | 375 | * @id: clock consumer ID |
1da177e4 LT |
376 | * |
377 | * Returns a struct clk corresponding to the clock producer, or | |
ea3f4eac RK |
378 | * valid IS_ERR() condition containing errno. The implementation |
379 | * uses @dev and @id to determine the clock consumer, and thereby | |
380 | * the clock producer. (IOW, @id may be identical strings, but | |
381 | * clk_get may return different clock producers depending on @dev.) | |
f47fc0ac RK |
382 | * |
383 | * Drivers must assume that the clock source is not enabled. | |
f7ad160b AR |
384 | * |
385 | * clk_get should not be called from within interrupt context. | |
1da177e4 LT |
386 | */ |
387 | struct clk *clk_get(struct device *dev, const char *id); | |
388 | ||
266e4e9d DA |
389 | /** |
390 | * clk_bulk_get - lookup and obtain a number of references to clock producer. | |
391 | * @dev: device for clock "consumer" | |
392 | * @num_clks: the number of clk_bulk_data | |
393 | * @clks: the clk_bulk_data table of consumer | |
394 | * | |
395 | * This helper function allows drivers to get several clk consumers in one | |
396 | * operation. If any of the clk cannot be acquired then any clks | |
397 | * that were obtained will be freed before returning to the caller. | |
398 | * | |
399 | * Returns 0 if all clocks specified in clk_bulk_data table are obtained | |
400 | * successfully, or valid IS_ERR() condition containing errno. | |
401 | * The implementation uses @dev and @clk_bulk_data.id to determine the | |
402 | * clock consumer, and thereby the clock producer. | |
403 | * The clock returned is stored in each @clk_bulk_data.clk field. | |
404 | * | |
405 | * Drivers must assume that the clock source is not enabled. | |
406 | * | |
407 | * clk_bulk_get should not be called from within interrupt context. | |
408 | */ | |
409 | int __must_check clk_bulk_get(struct device *dev, int num_clks, | |
410 | struct clk_bulk_data *clks); | |
616e45df DA |
411 | /** |
412 | * clk_bulk_get_all - lookup and obtain all available references to clock | |
413 | * producer. | |
414 | * @dev: device for clock "consumer" | |
415 | * @clks: pointer to the clk_bulk_data table of consumer | |
416 | * | |
417 | * This helper function allows drivers to get all clk consumers in one | |
418 | * operation. If any of the clk cannot be acquired then any clks | |
419 | * that were obtained will be freed before returning to the caller. | |
420 | * | |
421 | * Returns a positive value for the number of clocks obtained while the | |
422 | * clock references are stored in the clk_bulk_data table in @clks field. | |
423 | * Returns 0 if there're none and a negative value if something failed. | |
424 | * | |
425 | * Drivers must assume that the clock source is not enabled. | |
426 | * | |
427 | * clk_bulk_get should not be called from within interrupt context. | |
428 | */ | |
429 | int __must_check clk_bulk_get_all(struct device *dev, | |
430 | struct clk_bulk_data **clks); | |
2f25528e SN |
431 | |
432 | /** | |
433 | * clk_bulk_get_optional - lookup and obtain a number of references to clock producer | |
434 | * @dev: device for clock "consumer" | |
435 | * @num_clks: the number of clk_bulk_data | |
436 | * @clks: the clk_bulk_data table of consumer | |
437 | * | |
438 | * Behaves the same as clk_bulk_get() except where there is no clock producer. | |
439 | * In this case, instead of returning -ENOENT, the function returns 0 and | |
440 | * NULL for a clk for which a clock producer could not be determined. | |
441 | */ | |
442 | int __must_check clk_bulk_get_optional(struct device *dev, int num_clks, | |
443 | struct clk_bulk_data *clks); | |
618aee02 DA |
444 | /** |
445 | * devm_clk_bulk_get - managed get multiple clk consumers | |
446 | * @dev: device for clock "consumer" | |
447 | * @num_clks: the number of clk_bulk_data | |
448 | * @clks: the clk_bulk_data table of consumer | |
449 | * | |
450 | * Return 0 on success, an errno on failure. | |
451 | * | |
452 | * This helper function allows drivers to get several clk | |
453 | * consumers in one operation with management, the clks will | |
454 | * automatically be freed when the device is unbound. | |
455 | */ | |
456 | int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, | |
457 | struct clk_bulk_data *clks); | |
9bd5ef0b SN |
458 | /** |
459 | * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks | |
460 | * @dev: device for clock "consumer" | |
6ee82ef0 | 461 | * @num_clks: the number of clk_bulk_data |
9bd5ef0b SN |
462 | * @clks: pointer to the clk_bulk_data table of consumer |
463 | * | |
464 | * Behaves the same as devm_clk_bulk_get() except where there is no clock | |
465 | * producer. In this case, instead of returning -ENOENT, the function returns | |
466 | * NULL for given clk. It is assumed all clocks in clk_bulk_data are optional. | |
467 | * | |
468 | * Returns 0 if all clocks specified in clk_bulk_data table are obtained | |
469 | * successfully or for any clk there was no clk provider available, otherwise | |
470 | * returns valid IS_ERR() condition containing errno. | |
471 | * The implementation uses @dev and @clk_bulk_data.id to determine the | |
472 | * clock consumer, and thereby the clock producer. | |
473 | * The clock returned is stored in each @clk_bulk_data.clk field. | |
474 | * | |
475 | * Drivers must assume that the clock source is not enabled. | |
476 | * | |
477 | * clk_bulk_get should not be called from within interrupt context. | |
478 | */ | |
479 | int __must_check devm_clk_bulk_get_optional(struct device *dev, int num_clks, | |
480 | struct clk_bulk_data *clks); | |
f08c2e28 DA |
481 | /** |
482 | * devm_clk_bulk_get_all - managed get multiple clk consumers | |
483 | * @dev: device for clock "consumer" | |
484 | * @clks: pointer to the clk_bulk_data table of consumer | |
485 | * | |
486 | * Returns a positive value for the number of clocks obtained while the | |
487 | * clock references are stored in the clk_bulk_data table in @clks field. | |
488 | * Returns 0 if there're none and a negative value if something failed. | |
489 | * | |
490 | * This helper function allows drivers to get several clk | |
491 | * consumers in one operation with management, the clks will | |
492 | * automatically be freed when the device is unbound. | |
493 | */ | |
494 | ||
495 | int __must_check devm_clk_bulk_get_all(struct device *dev, | |
496 | struct clk_bulk_data **clks); | |
618aee02 | 497 | |
265b07df | 498 | /** |
51e32e89 | 499 | * devm_clk_bulk_get_all_enabled - Get and enable all clocks of the consumer (managed) |
265b07df ST |
500 | * @dev: device for clock "consumer" |
501 | * @clks: pointer to the clk_bulk_data table of consumer | |
502 | * | |
51e32e89 CC |
503 | * Returns a positive value for the number of clocks obtained while the |
504 | * clock references are stored in the clk_bulk_data table in @clks field. | |
505 | * Returns 0 if there're none and a negative value if something failed. | |
265b07df ST |
506 | * |
507 | * This helper function allows drivers to get all clocks of the | |
508 | * consumer and enables them in one operation with management. | |
509 | * The clks will automatically be disabled and freed when the device | |
510 | * is unbound. | |
511 | */ | |
512 | ||
51e32e89 CC |
513 | int __must_check devm_clk_bulk_get_all_enabled(struct device *dev, |
514 | struct clk_bulk_data **clks); | |
265b07df | 515 | |
a8a97db9 MB |
516 | /** |
517 | * devm_clk_get - lookup and obtain a managed reference to a clock producer. | |
518 | * @dev: device for clock "consumer" | |
a58b3a4a | 519 | * @id: clock consumer ID |
a8a97db9 | 520 | * |
af89cd45 UKK |
521 | * Context: May sleep. |
522 | * | |
523 | * Return: a struct clk corresponding to the clock producer, or | |
a8a97db9 MB |
524 | * valid IS_ERR() condition containing errno. The implementation |
525 | * uses @dev and @id to determine the clock consumer, and thereby | |
526 | * the clock producer. (IOW, @id may be identical strings, but | |
527 | * clk_get may return different clock producers depending on @dev.) | |
528 | * | |
af89cd45 UKK |
529 | * Drivers must assume that the clock source is neither prepared nor |
530 | * enabled. | |
a8a97db9 MB |
531 | * |
532 | * The clock will automatically be freed when the device is unbound | |
533 | * from the bus. | |
534 | */ | |
535 | struct clk *devm_clk_get(struct device *dev, const char *id); | |
536 | ||
7ef9651e UKK |
537 | /** |
538 | * devm_clk_get_prepared - devm_clk_get() + clk_prepare() | |
539 | * @dev: device for clock "consumer" | |
540 | * @id: clock consumer ID | |
541 | * | |
542 | * Context: May sleep. | |
543 | * | |
544 | * Return: a struct clk corresponding to the clock producer, or | |
545 | * valid IS_ERR() condition containing errno. The implementation | |
546 | * uses @dev and @id to determine the clock consumer, and thereby | |
547 | * the clock producer. (IOW, @id may be identical strings, but | |
548 | * clk_get may return different clock producers depending on @dev.) | |
549 | * | |
550 | * The returned clk (if valid) is prepared. Drivers must however assume | |
551 | * that the clock is not enabled. | |
552 | * | |
553 | * The clock will automatically be unprepared and freed when the device | |
554 | * is unbound from the bus. | |
555 | */ | |
556 | struct clk *devm_clk_get_prepared(struct device *dev, const char *id); | |
557 | ||
558 | /** | |
559 | * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable() | |
560 | * @dev: device for clock "consumer" | |
561 | * @id: clock consumer ID | |
562 | * | |
563 | * Context: May sleep. | |
564 | * | |
565 | * Return: a struct clk corresponding to the clock producer, or | |
566 | * valid IS_ERR() condition containing errno. The implementation | |
567 | * uses @dev and @id to determine the clock consumer, and thereby | |
568 | * the clock producer. (IOW, @id may be identical strings, but | |
569 | * clk_get may return different clock producers depending on @dev.) | |
570 | * | |
571 | * The returned clk (if valid) is prepared and enabled. | |
572 | * | |
573 | * The clock will automatically be disabled, unprepared and freed | |
574 | * when the device is unbound from the bus. | |
575 | */ | |
576 | struct clk *devm_clk_get_enabled(struct device *dev, const char *id); | |
577 | ||
60b8f0dd PE |
578 | /** |
579 | * devm_clk_get_optional - lookup and obtain a managed reference to an optional | |
580 | * clock producer. | |
581 | * @dev: device for clock "consumer" | |
582 | * @id: clock consumer ID | |
583 | * | |
af89cd45 UKK |
584 | * Context: May sleep. |
585 | * | |
586 | * Return: a struct clk corresponding to the clock producer, or | |
587 | * valid IS_ERR() condition containing errno. The implementation | |
588 | * uses @dev and @id to determine the clock consumer, and thereby | |
589 | * the clock producer. If no such clk is found, it returns NULL | |
590 | * which serves as a dummy clk. That's the only difference compared | |
591 | * to devm_clk_get(). | |
592 | * | |
593 | * Drivers must assume that the clock source is neither prepared nor | |
594 | * enabled. | |
595 | * | |
596 | * The clock will automatically be freed when the device is unbound | |
597 | * from the bus. | |
60b8f0dd PE |
598 | */ |
599 | struct clk *devm_clk_get_optional(struct device *dev, const char *id); | |
600 | ||
7ef9651e UKK |
601 | /** |
602 | * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare() | |
603 | * @dev: device for clock "consumer" | |
604 | * @id: clock consumer ID | |
605 | * | |
606 | * Context: May sleep. | |
607 | * | |
608 | * Return: a struct clk corresponding to the clock producer, or | |
609 | * valid IS_ERR() condition containing errno. The implementation | |
610 | * uses @dev and @id to determine the clock consumer, and thereby | |
611 | * the clock producer. If no such clk is found, it returns NULL | |
612 | * which serves as a dummy clk. That's the only difference compared | |
613 | * to devm_clk_get_prepared(). | |
614 | * | |
615 | * The returned clk (if valid) is prepared. Drivers must however | |
616 | * assume that the clock is not enabled. | |
617 | * | |
618 | * The clock will automatically be unprepared and freed when the | |
619 | * device is unbound from the bus. | |
620 | */ | |
621 | struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id); | |
622 | ||
623 | /** | |
624 | * devm_clk_get_optional_enabled - devm_clk_get_optional() + | |
625 | * clk_prepare_enable() | |
626 | * @dev: device for clock "consumer" | |
627 | * @id: clock consumer ID | |
628 | * | |
629 | * Context: May sleep. | |
630 | * | |
631 | * Return: a struct clk corresponding to the clock producer, or | |
632 | * valid IS_ERR() condition containing errno. The implementation | |
633 | * uses @dev and @id to determine the clock consumer, and thereby | |
634 | * the clock producer. If no such clk is found, it returns NULL | |
635 | * which serves as a dummy clk. That's the only difference compared | |
636 | * to devm_clk_get_enabled(). | |
637 | * | |
638 | * The returned clk (if valid) is prepared and enabled. | |
639 | * | |
640 | * The clock will automatically be disabled, unprepared and freed | |
641 | * when the device is unbound from the bus. | |
642 | */ | |
643 | struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id); | |
644 | ||
9934a1bd BG |
645 | /** |
646 | * devm_clk_get_optional_enabled_with_rate - devm_clk_get_optional() + | |
647 | * clk_set_rate() + | |
648 | * clk_prepare_enable() | |
649 | * @dev: device for clock "consumer" | |
650 | * @id: clock consumer ID | |
651 | * @rate: new clock rate | |
652 | * | |
653 | * Context: May sleep. | |
654 | * | |
655 | * Return: a struct clk corresponding to the clock producer, or | |
656 | * valid IS_ERR() condition containing errno. The implementation | |
657 | * uses @dev and @id to determine the clock consumer, and thereby | |
658 | * the clock producer. If no such clk is found, it returns NULL | |
659 | * which serves as a dummy clk. That's the only difference compared | |
660 | * to devm_clk_get_enabled(). | |
661 | * | |
662 | * The returned clk (if valid) is prepared and enabled and rate was set. | |
663 | * | |
664 | * The clock will automatically be disabled, unprepared and freed | |
665 | * when the device is unbound from the bus. | |
666 | */ | |
667 | struct clk *devm_clk_get_optional_enabled_with_rate(struct device *dev, | |
668 | const char *id, | |
669 | unsigned long rate); | |
670 | ||
71a2f115 KM |
671 | /** |
672 | * devm_get_clk_from_child - lookup and obtain a managed reference to a | |
673 | * clock producer from child node. | |
674 | * @dev: device for clock "consumer" | |
675 | * @np: pointer to clock consumer node | |
676 | * @con_id: clock consumer ID | |
677 | * | |
678 | * This function parses the clocks, and uses them to look up the | |
679 | * struct clk from the registered list of clock providers by using | |
680 | * @np and @con_id | |
681 | * | |
682 | * The clock will automatically be freed when the device is unbound | |
683 | * from the bus. | |
684 | */ | |
685 | struct clk *devm_get_clk_from_child(struct device *dev, | |
686 | struct device_node *np, const char *con_id); | |
687 | ||
1da177e4 LT |
688 | /** |
689 | * clk_enable - inform the system when the clock source should be running. | |
690 | * @clk: clock source | |
691 | * | |
692 | * If the clock can not be enabled/disabled, this should return success. | |
693 | * | |
40d3e0f4 RK |
694 | * May be called from atomic contexts. |
695 | * | |
1da177e4 LT |
696 | * Returns success (0) or negative errno. |
697 | */ | |
698 | int clk_enable(struct clk *clk); | |
699 | ||
266e4e9d DA |
700 | /** |
701 | * clk_bulk_enable - inform the system when the set of clks should be running. | |
702 | * @num_clks: the number of clk_bulk_data | |
703 | * @clks: the clk_bulk_data table of consumer | |
704 | * | |
705 | * May be called from atomic contexts. | |
706 | * | |
707 | * Returns success (0) or negative errno. | |
708 | */ | |
709 | int __must_check clk_bulk_enable(int num_clks, | |
710 | const struct clk_bulk_data *clks); | |
711 | ||
1da177e4 LT |
712 | /** |
713 | * clk_disable - inform the system when the clock source is no longer required. | |
714 | * @clk: clock source | |
f47fc0ac RK |
715 | * |
716 | * Inform the system that a clock source is no longer required by | |
717 | * a driver and may be shut down. | |
718 | * | |
40d3e0f4 RK |
719 | * May be called from atomic contexts. |
720 | * | |
f47fc0ac RK |
721 | * Implementation detail: if the clock source is shared between |
722 | * multiple drivers, clk_enable() calls must be balanced by the | |
723 | * same number of clk_disable() calls for the clock source to be | |
724 | * disabled. | |
1da177e4 LT |
725 | */ |
726 | void clk_disable(struct clk *clk); | |
727 | ||
266e4e9d DA |
728 | /** |
729 | * clk_bulk_disable - inform the system when the set of clks is no | |
730 | * longer required. | |
731 | * @num_clks: the number of clk_bulk_data | |
732 | * @clks: the clk_bulk_data table of consumer | |
733 | * | |
734 | * Inform the system that a set of clks is no longer required by | |
735 | * a driver and may be shut down. | |
736 | * | |
737 | * May be called from atomic contexts. | |
738 | * | |
739 | * Implementation detail: if the set of clks is shared between | |
740 | * multiple drivers, clk_bulk_enable() calls must be balanced by the | |
741 | * same number of clk_bulk_disable() calls for the clock source to be | |
742 | * disabled. | |
743 | */ | |
744 | void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks); | |
745 | ||
1da177e4 LT |
746 | /** |
747 | * clk_get_rate - obtain the current clock rate (in Hz) for a clock source. | |
748 | * This is only valid once the clock source has been enabled. | |
749 | * @clk: clock source | |
750 | */ | |
751 | unsigned long clk_get_rate(struct clk *clk); | |
752 | ||
753 | /** | |
754 | * clk_put - "free" the clock source | |
755 | * @clk: clock source | |
f47fc0ac RK |
756 | * |
757 | * Note: drivers must ensure that all clk_enable calls made on this | |
758 | * clock source are balanced by clk_disable calls prior to calling | |
759 | * this function. | |
f7ad160b AR |
760 | * |
761 | * clk_put should not be called from within interrupt context. | |
1da177e4 LT |
762 | */ |
763 | void clk_put(struct clk *clk); | |
764 | ||
266e4e9d DA |
765 | /** |
766 | * clk_bulk_put - "free" the clock source | |
767 | * @num_clks: the number of clk_bulk_data | |
768 | * @clks: the clk_bulk_data table of consumer | |
769 | * | |
770 | * Note: drivers must ensure that all clk_bulk_enable calls made on this | |
771 | * clock source are balanced by clk_bulk_disable calls prior to calling | |
772 | * this function. | |
773 | * | |
774 | * clk_bulk_put should not be called from within interrupt context. | |
775 | */ | |
776 | void clk_bulk_put(int num_clks, struct clk_bulk_data *clks); | |
777 | ||
616e45df DA |
778 | /** |
779 | * clk_bulk_put_all - "free" all the clock source | |
780 | * @num_clks: the number of clk_bulk_data | |
781 | * @clks: the clk_bulk_data table of consumer | |
782 | * | |
783 | * Note: drivers must ensure that all clk_bulk_enable calls made on this | |
784 | * clock source are balanced by clk_bulk_disable calls prior to calling | |
785 | * this function. | |
786 | * | |
787 | * clk_bulk_put_all should not be called from within interrupt context. | |
788 | */ | |
789 | void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks); | |
790 | ||
a8a97db9 MB |
791 | /** |
792 | * devm_clk_put - "free" a managed clock source | |
da3dae54 | 793 | * @dev: device used to acquire the clock |
a8a97db9 MB |
794 | * @clk: clock source acquired with devm_clk_get() |
795 | * | |
796 | * Note: drivers must ensure that all clk_enable calls made on this | |
797 | * clock source are balanced by clk_disable calls prior to calling | |
798 | * this function. | |
799 | * | |
800 | * clk_put should not be called from within interrupt context. | |
801 | */ | |
802 | void devm_clk_put(struct device *dev, struct clk *clk); | |
1da177e4 LT |
803 | |
804 | /* | |
805 | * The remaining APIs are optional for machine class support. | |
806 | */ | |
807 | ||
808 | ||
809 | /** | |
810 | * clk_round_rate - adjust a rate to the exact rate a clock can provide | |
811 | * @clk: clock source | |
812 | * @rate: desired clock rate in Hz | |
813 | * | |
d2d14a77 RK |
814 | * This answers the question "if I were to pass @rate to clk_set_rate(), |
815 | * what clock rate would I end up with?" without changing the hardware | |
816 | * in any way. In other words: | |
817 | * | |
818 | * rate = clk_round_rate(clk, r); | |
819 | * | |
820 | * and: | |
821 | * | |
822 | * clk_set_rate(clk, r); | |
823 | * rate = clk_get_rate(clk); | |
824 | * | |
825 | * are equivalent except the former does not modify the clock hardware | |
826 | * in any way. | |
827 | * | |
1da177e4 LT |
828 | * Returns rounded clock rate in Hz, or negative errno. |
829 | */ | |
830 | long clk_round_rate(struct clk *clk, unsigned long rate); | |
8b7730dd | 831 | |
1da177e4 LT |
832 | /** |
833 | * clk_set_rate - set the clock rate for a clock source | |
834 | * @clk: clock source | |
835 | * @rate: desired clock rate in Hz | |
836 | * | |
64c76b31 MB |
837 | * Updating the rate starts at the top-most affected clock and then |
838 | * walks the tree down to the bottom-most clock that needs updating. | |
839 | * | |
1da177e4 LT |
840 | * Returns success (0) or negative errno. |
841 | */ | |
842 | int clk_set_rate(struct clk *clk, unsigned long rate); | |
8b7730dd | 843 | |
55e9b8b7 JB |
844 | /** |
845 | * clk_set_rate_exclusive- set the clock rate and claim exclusivity over | |
846 | * clock source | |
847 | * @clk: clock source | |
848 | * @rate: desired clock rate in Hz | |
849 | * | |
850 | * This helper function allows drivers to atomically set the rate of a producer | |
851 | * and claim exclusivity over the rate control of the producer. | |
852 | * | |
853 | * It is essentially a combination of clk_set_rate() and | |
854 | * clk_rate_exclusite_get(). Caller must balance this call with a call to | |
855 | * clk_rate_exclusive_put() | |
856 | * | |
857 | * Returns success (0) or negative errno. | |
858 | */ | |
859 | int clk_set_rate_exclusive(struct clk *clk, unsigned long rate); | |
860 | ||
4e88f3de TR |
861 | /** |
862 | * clk_has_parent - check if a clock is a possible parent for another | |
863 | * @clk: clock source | |
864 | * @parent: parent clock source | |
865 | * | |
866 | * This function can be used in drivers that need to check that a clock can be | |
867 | * the parent of another without actually changing the parent. | |
868 | * | |
869 | * Returns true if @parent is a possible parent for @clk, false otherwise. | |
870 | */ | |
22fb0e28 | 871 | bool clk_has_parent(const struct clk *clk, const struct clk *parent); |
4e88f3de | 872 | |
1c8e6004 TV |
873 | /** |
874 | * clk_set_rate_range - set a rate range for a clock source | |
875 | * @clk: clock source | |
876 | * @min: desired minimum clock rate in Hz, inclusive | |
877 | * @max: desired maximum clock rate in Hz, inclusive | |
878 | * | |
879 | * Returns success (0) or negative errno. | |
880 | */ | |
881 | int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max); | |
882 | ||
883 | /** | |
884 | * clk_set_min_rate - set a minimum clock rate for a clock source | |
885 | * @clk: clock source | |
886 | * @rate: desired minimum clock rate in Hz, inclusive | |
887 | * | |
888 | * Returns success (0) or negative errno. | |
889 | */ | |
890 | int clk_set_min_rate(struct clk *clk, unsigned long rate); | |
891 | ||
892 | /** | |
893 | * clk_set_max_rate - set a maximum clock rate for a clock source | |
894 | * @clk: clock source | |
895 | * @rate: desired maximum clock rate in Hz, inclusive | |
896 | * | |
897 | * Returns success (0) or negative errno. | |
898 | */ | |
899 | int clk_set_max_rate(struct clk *clk, unsigned long rate); | |
900 | ||
1da177e4 LT |
901 | /** |
902 | * clk_set_parent - set the parent clock source for this clock | |
903 | * @clk: clock source | |
904 | * @parent: parent clock source | |
905 | * | |
906 | * Returns success (0) or negative errno. | |
907 | */ | |
908 | int clk_set_parent(struct clk *clk, struct clk *parent); | |
909 | ||
910 | /** | |
911 | * clk_get_parent - get the parent clock source for this clock | |
912 | * @clk: clock source | |
913 | * | |
914 | * Returns struct clk corresponding to parent clock source, or | |
915 | * valid IS_ERR() condition containing errno. | |
916 | */ | |
917 | struct clk *clk_get_parent(struct clk *clk); | |
918 | ||
05fd8e73 SH |
919 | /** |
920 | * clk_get_sys - get a clock based upon the device name | |
921 | * @dev_id: device name | |
922 | * @con_id: connection ID | |
923 | * | |
924 | * Returns a struct clk corresponding to the clock producer, or | |
925 | * valid IS_ERR() condition containing errno. The implementation | |
926 | * uses @dev_id and @con_id to determine the clock consumer, and | |
927 | * thereby the clock producer. In contrast to clk_get() this function | |
928 | * takes the device name instead of the device itself for identification. | |
929 | * | |
930 | * Drivers must assume that the clock source is not enabled. | |
931 | * | |
932 | * clk_get_sys should not be called from within interrupt context. | |
933 | */ | |
934 | struct clk *clk_get_sys(const char *dev_id, const char *con_id); | |
935 | ||
8b95d1ce RD |
936 | /** |
937 | * clk_save_context - save clock context for poweroff | |
938 | * | |
939 | * Saves the context of the clock register for powerstates in which the | |
940 | * contents of the registers will be lost. Occurs deep within the suspend | |
941 | * code so locking is not necessary. | |
942 | */ | |
943 | int clk_save_context(void); | |
944 | ||
945 | /** | |
946 | * clk_restore_context - restore clock context after poweroff | |
947 | * | |
948 | * This occurs with all clocks enabled. Occurs deep within the resume code | |
949 | * so locking is not necessary. | |
950 | */ | |
951 | void clk_restore_context(void); | |
952 | ||
93abe8e4 VK |
953 | #else /* !CONFIG_HAVE_CLK */ |
954 | ||
955 | static inline struct clk *clk_get(struct device *dev, const char *id) | |
956 | { | |
957 | return NULL; | |
958 | } | |
959 | ||
6e0d4ff4 DA |
960 | static inline int __must_check clk_bulk_get(struct device *dev, int num_clks, |
961 | struct clk_bulk_data *clks) | |
266e4e9d DA |
962 | { |
963 | return 0; | |
964 | } | |
965 | ||
2f25528e SN |
966 | static inline int __must_check clk_bulk_get_optional(struct device *dev, |
967 | int num_clks, struct clk_bulk_data *clks) | |
968 | { | |
969 | return 0; | |
970 | } | |
971 | ||
616e45df DA |
972 | static inline int __must_check clk_bulk_get_all(struct device *dev, |
973 | struct clk_bulk_data **clks) | |
974 | { | |
975 | return 0; | |
976 | } | |
977 | ||
93abe8e4 VK |
978 | static inline struct clk *devm_clk_get(struct device *dev, const char *id) |
979 | { | |
980 | return NULL; | |
981 | } | |
982 | ||
7ef9651e UKK |
983 | static inline struct clk *devm_clk_get_prepared(struct device *dev, |
984 | const char *id) | |
985 | { | |
986 | return NULL; | |
987 | } | |
988 | ||
989 | static inline struct clk *devm_clk_get_enabled(struct device *dev, | |
990 | const char *id) | |
991 | { | |
992 | return NULL; | |
993 | } | |
994 | ||
60b8f0dd PE |
995 | static inline struct clk *devm_clk_get_optional(struct device *dev, |
996 | const char *id) | |
997 | { | |
998 | return NULL; | |
999 | } | |
1000 | ||
7ef9651e UKK |
1001 | static inline struct clk *devm_clk_get_optional_prepared(struct device *dev, |
1002 | const char *id) | |
1003 | { | |
1004 | return NULL; | |
1005 | } | |
1006 | ||
1007 | static inline struct clk *devm_clk_get_optional_enabled(struct device *dev, | |
1008 | const char *id) | |
1009 | { | |
1010 | return NULL; | |
1011 | } | |
1012 | ||
9934a1bd BG |
1013 | static inline struct clk * |
1014 | devm_clk_get_optional_enabled_with_rate(struct device *dev, const char *id, | |
1015 | unsigned long rate) | |
1016 | { | |
1017 | return NULL; | |
1018 | } | |
1019 | ||
6e0d4ff4 DA |
1020 | static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, |
1021 | struct clk_bulk_data *clks) | |
618aee02 DA |
1022 | { |
1023 | return 0; | |
1024 | } | |
1025 | ||
9bd5ef0b SN |
1026 | static inline int __must_check devm_clk_bulk_get_optional(struct device *dev, |
1027 | int num_clks, struct clk_bulk_data *clks) | |
1028 | { | |
1029 | return 0; | |
1030 | } | |
1031 | ||
f08c2e28 DA |
1032 | static inline int __must_check devm_clk_bulk_get_all(struct device *dev, |
1033 | struct clk_bulk_data **clks) | |
1034 | { | |
1035 | ||
1036 | return 0; | |
1037 | } | |
1038 | ||
51e32e89 | 1039 | static inline int __must_check devm_clk_bulk_get_all_enabled(struct device *dev, |
265b07df ST |
1040 | struct clk_bulk_data **clks) |
1041 | { | |
1042 | return 0; | |
1043 | } | |
1044 | ||
71a2f115 KM |
1045 | static inline struct clk *devm_get_clk_from_child(struct device *dev, |
1046 | struct device_node *np, const char *con_id) | |
1047 | { | |
1048 | return NULL; | |
1049 | } | |
1050 | ||
93abe8e4 VK |
1051 | static inline void clk_put(struct clk *clk) {} |
1052 | ||
266e4e9d DA |
1053 | static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {} |
1054 | ||
616e45df DA |
1055 | static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {} |
1056 | ||
93abe8e4 VK |
1057 | static inline void devm_clk_put(struct device *dev, struct clk *clk) {} |
1058 | ||
1059 | static inline int clk_enable(struct clk *clk) | |
1060 | { | |
1061 | return 0; | |
1062 | } | |
1063 | ||
570aaec7 AS |
1064 | static inline int __must_check clk_bulk_enable(int num_clks, |
1065 | const struct clk_bulk_data *clks) | |
266e4e9d DA |
1066 | { |
1067 | return 0; | |
1068 | } | |
1069 | ||
93abe8e4 VK |
1070 | static inline void clk_disable(struct clk *clk) {} |
1071 | ||
266e4e9d DA |
1072 | |
1073 | static inline void clk_bulk_disable(int num_clks, | |
570aaec7 | 1074 | const struct clk_bulk_data *clks) {} |
266e4e9d | 1075 | |
93abe8e4 VK |
1076 | static inline unsigned long clk_get_rate(struct clk *clk) |
1077 | { | |
1078 | return 0; | |
1079 | } | |
1080 | ||
1081 | static inline int clk_set_rate(struct clk *clk, unsigned long rate) | |
1082 | { | |
1083 | return 0; | |
1084 | } | |
1085 | ||
55e9b8b7 JB |
1086 | static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) |
1087 | { | |
1088 | return 0; | |
1089 | } | |
1090 | ||
93abe8e4 VK |
1091 | static inline long clk_round_rate(struct clk *clk, unsigned long rate) |
1092 | { | |
1093 | return 0; | |
1094 | } | |
1095 | ||
4e88f3de TR |
1096 | static inline bool clk_has_parent(struct clk *clk, struct clk *parent) |
1097 | { | |
1098 | return true; | |
1099 | } | |
1100 | ||
b88c9f41 DO |
1101 | static inline int clk_set_rate_range(struct clk *clk, unsigned long min, |
1102 | unsigned long max) | |
1103 | { | |
1104 | return 0; | |
1105 | } | |
1106 | ||
1107 | static inline int clk_set_min_rate(struct clk *clk, unsigned long rate) | |
1108 | { | |
1109 | return 0; | |
1110 | } | |
1111 | ||
1112 | static inline int clk_set_max_rate(struct clk *clk, unsigned long rate) | |
1113 | { | |
1114 | return 0; | |
1115 | } | |
1116 | ||
93abe8e4 VK |
1117 | static inline int clk_set_parent(struct clk *clk, struct clk *parent) |
1118 | { | |
1119 | return 0; | |
1120 | } | |
1121 | ||
1122 | static inline struct clk *clk_get_parent(struct clk *clk) | |
1123 | { | |
1124 | return NULL; | |
1125 | } | |
1126 | ||
b81ea968 DL |
1127 | static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id) |
1128 | { | |
1129 | return NULL; | |
1130 | } | |
8b95d1ce RD |
1131 | |
1132 | static inline int clk_save_context(void) | |
1133 | { | |
1134 | return 0; | |
1135 | } | |
1136 | ||
1137 | static inline void clk_restore_context(void) {} | |
1138 | ||
93abe8e4 VK |
1139 | #endif |
1140 | ||
1141 | /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ | |
1142 | static inline int clk_prepare_enable(struct clk *clk) | |
1143 | { | |
1144 | int ret; | |
1145 | ||
1146 | ret = clk_prepare(clk); | |
1147 | if (ret) | |
1148 | return ret; | |
1149 | ret = clk_enable(clk); | |
1150 | if (ret) | |
1151 | clk_unprepare(clk); | |
1152 | ||
1153 | return ret; | |
1154 | } | |
1155 | ||
1156 | /* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */ | |
1157 | static inline void clk_disable_unprepare(struct clk *clk) | |
1158 | { | |
1159 | clk_disable(clk); | |
1160 | clk_unprepare(clk); | |
1161 | } | |
1162 | ||
570aaec7 AS |
1163 | static inline int __must_check |
1164 | clk_bulk_prepare_enable(int num_clks, const struct clk_bulk_data *clks) | |
3c48d86c BA |
1165 | { |
1166 | int ret; | |
1167 | ||
1168 | ret = clk_bulk_prepare(num_clks, clks); | |
1169 | if (ret) | |
1170 | return ret; | |
1171 | ret = clk_bulk_enable(num_clks, clks); | |
1172 | if (ret) | |
1173 | clk_bulk_unprepare(num_clks, clks); | |
1174 | ||
1175 | return ret; | |
1176 | } | |
1177 | ||
1178 | static inline void clk_bulk_disable_unprepare(int num_clks, | |
570aaec7 | 1179 | const struct clk_bulk_data *clks) |
3c48d86c BA |
1180 | { |
1181 | clk_bulk_disable(num_clks, clks); | |
1182 | clk_bulk_unprepare(num_clks, clks); | |
1183 | } | |
1184 | ||
c9744843 MR |
1185 | /** |
1186 | * clk_drop_range - Reset any range set on that clock | |
1187 | * @clk: clock source | |
1188 | * | |
1189 | * Returns success (0) or negative errno. | |
1190 | */ | |
1191 | static inline int clk_drop_range(struct clk *clk) | |
1192 | { | |
1193 | return clk_set_rate_range(clk, 0, ULONG_MAX); | |
1194 | } | |
1195 | ||
60b8f0dd PE |
1196 | /** |
1197 | * clk_get_optional - lookup and obtain a reference to an optional clock | |
1198 | * producer. | |
1199 | * @dev: device for clock "consumer" | |
1200 | * @id: clock consumer ID | |
1201 | * | |
1202 | * Behaves the same as clk_get() except where there is no clock producer. In | |
1203 | * this case, instead of returning -ENOENT, the function returns NULL. | |
1204 | */ | |
1205 | static inline struct clk *clk_get_optional(struct device *dev, const char *id) | |
1206 | { | |
1207 | struct clk *clk = clk_get(dev, id); | |
1208 | ||
1209 | if (clk == ERR_PTR(-ENOENT)) | |
1210 | return NULL; | |
1211 | ||
1212 | return clk; | |
1213 | } | |
1214 | ||
137f8a72 | 1215 | #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) |
766e6a4e GL |
1216 | struct clk *of_clk_get(struct device_node *np, int index); |
1217 | struct clk *of_clk_get_by_name(struct device_node *np, const char *name); | |
1218 | struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec); | |
1219 | #else | |
1220 | static inline struct clk *of_clk_get(struct device_node *np, int index) | |
1221 | { | |
9f1612d3 | 1222 | return ERR_PTR(-ENOENT); |
766e6a4e GL |
1223 | } |
1224 | static inline struct clk *of_clk_get_by_name(struct device_node *np, | |
1225 | const char *name) | |
1226 | { | |
9f1612d3 | 1227 | return ERR_PTR(-ENOENT); |
766e6a4e | 1228 | } |
428c9de5 GU |
1229 | static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) |
1230 | { | |
1231 | return ERR_PTR(-ENOENT); | |
1232 | } | |
766e6a4e GL |
1233 | #endif |
1234 | ||
1da177e4 | 1235 | #endif |