5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Standard functionality for the common clock API. See Documentation/clk.txt
12 #include <linux/clk-private.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/spinlock.h>
16 #include <linux/err.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
20 #include <linux/device.h>
22 static DEFINE_SPINLOCK(enable_lock);
23 static DEFINE_MUTEX(prepare_lock);
25 static HLIST_HEAD(clk_root_list);
26 static HLIST_HEAD(clk_orphan_list);
27 static LIST_HEAD(clk_notifier_list);
29 /*** debugfs support ***/
31 #ifdef CONFIG_COMMON_CLK_DEBUG
32 #include <linux/debugfs.h>
34 static struct dentry *rootdir;
35 static struct dentry *orphandir;
36 static int inited = 0;
38 /* caller must hold prepare_lock */
39 static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
44 if (!clk || !pdentry) {
49 d = debugfs_create_dir(clk->name, pdentry);
55 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
60 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
65 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
66 (u32 *)&clk->prepare_count);
70 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
71 (u32 *)&clk->enable_count);
75 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
76 (u32 *)&clk->notifier_count);
84 debugfs_remove(clk->dentry);
89 /* caller must hold prepare_lock */
90 static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
93 struct hlist_node *tmp;
99 ret = clk_debug_create_one(clk, pdentry);
104 hlist_for_each_entry(child, tmp, &clk->children, child_node)
105 clk_debug_create_subtree(child, clk->dentry);
113 * clk_debug_register - add a clk node to the debugfs clk tree
114 * @clk: the clk being added to the debugfs clk tree
116 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
117 * initialized. Otherwise it bails out early since the debugfs clk tree
118 * will be created lazily by clk_debug_init as part of a late_initcall.
120 * Caller must hold prepare_lock. Only clk_init calls this function (so
121 * far) so this is taken care.
123 static int clk_debug_register(struct clk *clk)
126 struct dentry *pdentry;
132 parent = clk->parent;
135 * Check to see if a clk is a root clk. Also check that it is
136 * safe to add this clk to debugfs
139 if (clk->flags & CLK_IS_ROOT)
145 pdentry = parent->dentry;
149 ret = clk_debug_create_subtree(clk, pdentry);
156 * clk_debug_init - lazily create the debugfs clk tree visualization
158 * clks are often initialized very early during boot before memory can
159 * be dynamically allocated and well before debugfs is setup.
160 * clk_debug_init walks the clk tree hierarchy while holding
161 * prepare_lock and creates the topology as part of a late_initcall,
162 * thus insuring that clks initialized very early will still be
163 * represented in the debugfs clk tree. This function should only be
164 * called once at boot-time, and all other clks added dynamically will
165 * be done so with clk_debug_register.
167 static int __init clk_debug_init(void)
170 struct hlist_node *tmp;
172 rootdir = debugfs_create_dir("clk", NULL);
177 orphandir = debugfs_create_dir("orphans", rootdir);
182 mutex_lock(&prepare_lock);
184 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
185 clk_debug_create_subtree(clk, rootdir);
187 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
188 clk_debug_create_subtree(clk, orphandir);
192 mutex_unlock(&prepare_lock);
196 late_initcall(clk_debug_init);
198 static inline int clk_debug_register(struct clk *clk) { return 0; }
201 /* caller must hold prepare_lock */
202 static void clk_disable_unused_subtree(struct clk *clk)
205 struct hlist_node *tmp;
211 hlist_for_each_entry(child, tmp, &clk->children, child_node)
212 clk_disable_unused_subtree(child);
214 spin_lock_irqsave(&enable_lock, flags);
216 if (clk->enable_count)
219 if (clk->flags & CLK_IGNORE_UNUSED)
223 * some gate clocks have special needs during the disable-unused
224 * sequence. call .disable_unused if available, otherwise fall
227 if (__clk_is_enabled(clk)) {
228 if (clk->ops->disable_unused)
229 clk->ops->disable_unused(clk->hw);
230 else if (clk->ops->disable)
231 clk->ops->disable(clk->hw);
235 spin_unlock_irqrestore(&enable_lock, flags);
241 static int clk_disable_unused(void)
244 struct hlist_node *tmp;
246 mutex_lock(&prepare_lock);
248 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
249 clk_disable_unused_subtree(clk);
251 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
252 clk_disable_unused_subtree(clk);
254 mutex_unlock(&prepare_lock);
258 late_initcall(clk_disable_unused);
260 /*** helper functions ***/
262 inline const char *__clk_get_name(struct clk *clk)
264 return !clk ? NULL : clk->name;
267 inline struct clk_hw *__clk_get_hw(struct clk *clk)
269 return !clk ? NULL : clk->hw;
272 inline u8 __clk_get_num_parents(struct clk *clk)
274 return !clk ? 0 : clk->num_parents;
277 inline struct clk *__clk_get_parent(struct clk *clk)
279 return !clk ? NULL : clk->parent;
282 inline unsigned int __clk_get_enable_count(struct clk *clk)
284 return !clk ? 0 : clk->enable_count;
287 inline unsigned int __clk_get_prepare_count(struct clk *clk)
289 return !clk ? 0 : clk->prepare_count;
292 unsigned long __clk_get_rate(struct clk *clk)
303 if (clk->flags & CLK_IS_ROOT)
313 inline unsigned long __clk_get_flags(struct clk *clk)
315 return !clk ? 0 : clk->flags;
318 bool __clk_is_enabled(struct clk *clk)
326 * .is_enabled is only mandatory for clocks that gate
327 * fall back to software usage counter if .is_enabled is missing
329 if (!clk->ops->is_enabled) {
330 ret = clk->enable_count ? 1 : 0;
334 ret = clk->ops->is_enabled(clk->hw);
339 static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
343 struct hlist_node *tmp;
345 if (!strcmp(clk->name, name))
348 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
349 ret = __clk_lookup_subtree(name, child);
357 struct clk *__clk_lookup(const char *name)
359 struct clk *root_clk;
361 struct hlist_node *tmp;
366 /* search the 'proper' clk tree first */
367 hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
368 ret = __clk_lookup_subtree(name, root_clk);
373 /* if not found, then search the orphan tree */
374 hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
375 ret = __clk_lookup_subtree(name, root_clk);
385 void __clk_unprepare(struct clk *clk)
390 if (WARN_ON(clk->prepare_count == 0))
393 if (--clk->prepare_count > 0)
396 WARN_ON(clk->enable_count > 0);
398 if (clk->ops->unprepare)
399 clk->ops->unprepare(clk->hw);
401 __clk_unprepare(clk->parent);
405 * clk_unprepare - undo preparation of a clock source
406 * @clk: the clk being unprepare
408 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
409 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
410 * if the operation may sleep. One example is a clk which is accessed over
411 * I2c. In the complex case a clk gate operation may require a fast and a slow
412 * part. It is this reason that clk_unprepare and clk_disable are not mutually
413 * exclusive. In fact clk_disable must be called before clk_unprepare.
415 void clk_unprepare(struct clk *clk)
417 mutex_lock(&prepare_lock);
418 __clk_unprepare(clk);
419 mutex_unlock(&prepare_lock);
421 EXPORT_SYMBOL_GPL(clk_unprepare);
423 int __clk_prepare(struct clk *clk)
430 if (clk->prepare_count == 0) {
431 ret = __clk_prepare(clk->parent);
435 if (clk->ops->prepare) {
436 ret = clk->ops->prepare(clk->hw);
438 __clk_unprepare(clk->parent);
444 clk->prepare_count++;
450 * clk_prepare - prepare a clock source
451 * @clk: the clk being prepared
453 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
454 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
455 * operation may sleep. One example is a clk which is accessed over I2c. In
456 * the complex case a clk ungate operation may require a fast and a slow part.
457 * It is this reason that clk_prepare and clk_enable are not mutually
458 * exclusive. In fact clk_prepare must be called before clk_enable.
459 * Returns 0 on success, -EERROR otherwise.
461 int clk_prepare(struct clk *clk)
465 mutex_lock(&prepare_lock);
466 ret = __clk_prepare(clk);
467 mutex_unlock(&prepare_lock);
471 EXPORT_SYMBOL_GPL(clk_prepare);
473 static void __clk_disable(struct clk *clk)
478 if (WARN_ON(IS_ERR(clk)))
481 if (WARN_ON(clk->enable_count == 0))
484 if (--clk->enable_count > 0)
487 if (clk->ops->disable)
488 clk->ops->disable(clk->hw);
490 __clk_disable(clk->parent);
494 * clk_disable - gate a clock
495 * @clk: the clk being gated
497 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
498 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
499 * clk if the operation is fast and will never sleep. One example is a
500 * SoC-internal clk which is controlled via simple register writes. In the
501 * complex case a clk gate operation may require a fast and a slow part. It is
502 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
503 * In fact clk_disable must be called before clk_unprepare.
505 void clk_disable(struct clk *clk)
509 spin_lock_irqsave(&enable_lock, flags);
511 spin_unlock_irqrestore(&enable_lock, flags);
513 EXPORT_SYMBOL_GPL(clk_disable);
515 static int __clk_enable(struct clk *clk)
522 if (WARN_ON(clk->prepare_count == 0))
525 if (clk->enable_count == 0) {
526 ret = __clk_enable(clk->parent);
531 if (clk->ops->enable) {
532 ret = clk->ops->enable(clk->hw);
534 __clk_disable(clk->parent);
545 * clk_enable - ungate a clock
546 * @clk: the clk being ungated
548 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
549 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
550 * if the operation will never sleep. One example is a SoC-internal clk which
551 * is controlled via simple register writes. In the complex case a clk ungate
552 * operation may require a fast and a slow part. It is this reason that
553 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
554 * must be called before clk_enable. Returns 0 on success, -EERROR
557 int clk_enable(struct clk *clk)
562 spin_lock_irqsave(&enable_lock, flags);
563 ret = __clk_enable(clk);
564 spin_unlock_irqrestore(&enable_lock, flags);
568 EXPORT_SYMBOL_GPL(clk_enable);
571 * __clk_round_rate - round the given rate for a clk
572 * @clk: round the rate of this clock
574 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
576 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
578 unsigned long parent_rate = 0;
583 if (!clk->ops->round_rate) {
584 if (clk->flags & CLK_SET_RATE_PARENT)
585 return __clk_round_rate(clk->parent, rate);
591 parent_rate = clk->parent->rate;
593 return clk->ops->round_rate(clk->hw, rate, &parent_rate);
597 * clk_round_rate - round the given rate for a clk
598 * @clk: the clk for which we are rounding a rate
599 * @rate: the rate which is to be rounded
601 * Takes in a rate as input and rounds it to a rate that the clk can actually
602 * use which is then returned. If clk doesn't support round_rate operation
603 * then the parent rate is returned.
605 long clk_round_rate(struct clk *clk, unsigned long rate)
609 mutex_lock(&prepare_lock);
610 ret = __clk_round_rate(clk, rate);
611 mutex_unlock(&prepare_lock);
615 EXPORT_SYMBOL_GPL(clk_round_rate);
618 * __clk_notify - call clk notifier chain
619 * @clk: struct clk * that is changing rate
620 * @msg: clk notifier type (see include/linux/clk.h)
621 * @old_rate: old clk rate
622 * @new_rate: new clk rate
624 * Triggers a notifier call chain on the clk rate-change notification
625 * for 'clk'. Passes a pointer to the struct clk and the previous
626 * and current rates to the notifier callback. Intended to be called by
627 * internal clock code only. Returns NOTIFY_DONE from the last driver
628 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
629 * a driver returns that.
631 static int __clk_notify(struct clk *clk, unsigned long msg,
632 unsigned long old_rate, unsigned long new_rate)
634 struct clk_notifier *cn;
635 struct clk_notifier_data cnd;
636 int ret = NOTIFY_DONE;
639 cnd.old_rate = old_rate;
640 cnd.new_rate = new_rate;
642 list_for_each_entry(cn, &clk_notifier_list, node) {
643 if (cn->clk == clk) {
644 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
655 * @clk: first clk in the subtree
656 * @msg: notification type (see include/linux/clk.h)
658 * Walks the subtree of clks starting with clk and recalculates rates as it
659 * goes. Note that if a clk does not implement the .recalc_rate callback then
660 * it is assumed that the clock will take on the rate of it's parent.
662 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
665 * Caller must hold prepare_lock.
667 static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
669 unsigned long old_rate;
670 unsigned long parent_rate = 0;
671 struct hlist_node *tmp;
674 old_rate = clk->rate;
677 parent_rate = clk->parent->rate;
679 if (clk->ops->recalc_rate)
680 clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
682 clk->rate = parent_rate;
685 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
686 * & ABORT_RATE_CHANGE notifiers
688 if (clk->notifier_count && msg)
689 __clk_notify(clk, msg, old_rate, clk->rate);
691 hlist_for_each_entry(child, tmp, &clk->children, child_node)
692 __clk_recalc_rates(child, msg);
696 * clk_get_rate - return the rate of clk
697 * @clk: the clk whose rate is being returned
699 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
700 * is set, which means a recalc_rate will be issued.
701 * If clk is NULL then returns 0.
703 unsigned long clk_get_rate(struct clk *clk)
707 mutex_lock(&prepare_lock);
709 if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
710 __clk_recalc_rates(clk, 0);
712 rate = __clk_get_rate(clk);
713 mutex_unlock(&prepare_lock);
717 EXPORT_SYMBOL_GPL(clk_get_rate);
720 * __clk_speculate_rates
721 * @clk: first clk in the subtree
722 * @parent_rate: the "future" rate of clk's parent
724 * Walks the subtree of clks starting with clk, speculating rates as it
725 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
727 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
728 * pre-rate change notifications and returns early if no clks in the
729 * subtree have subscribed to the notifications. Note that if a clk does not
730 * implement the .recalc_rate callback then it is assumed that the clock will
731 * take on the rate of it's parent.
733 * Caller must hold prepare_lock.
735 static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
737 struct hlist_node *tmp;
739 unsigned long new_rate;
740 int ret = NOTIFY_DONE;
742 if (clk->ops->recalc_rate)
743 new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
745 new_rate = parent_rate;
747 /* abort the rate change if a driver returns NOTIFY_BAD */
748 if (clk->notifier_count)
749 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
751 if (ret == NOTIFY_BAD)
754 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
755 ret = __clk_speculate_rates(child, new_rate);
756 if (ret == NOTIFY_BAD)
764 static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
767 struct hlist_node *tmp;
769 clk->new_rate = new_rate;
771 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
772 if (child->ops->recalc_rate)
773 child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
775 child->new_rate = new_rate;
776 clk_calc_subtree(child, child->new_rate);
781 * calculate the new rates returning the topmost clock that has to be
784 static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
786 struct clk *top = clk;
787 unsigned long best_parent_rate = 0;
788 unsigned long new_rate;
791 if (IS_ERR_OR_NULL(clk))
794 /* save parent rate, if it exists */
796 best_parent_rate = clk->parent->rate;
798 /* never propagate up to the parent */
799 if (!(clk->flags & CLK_SET_RATE_PARENT)) {
800 if (!clk->ops->round_rate) {
801 clk->new_rate = clk->rate;
804 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
808 /* need clk->parent from here on out */
810 pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
814 if (!clk->ops->round_rate) {
815 top = clk_calc_new_rates(clk->parent, rate);
816 new_rate = clk->parent->new_rate;
821 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
823 if (best_parent_rate != clk->parent->rate) {
824 top = clk_calc_new_rates(clk->parent, best_parent_rate);
830 clk_calc_subtree(clk, new_rate);
836 * Notify about rate changes in a subtree. Always walk down the whole tree
837 * so that in case of an error we can walk down the whole tree again and
840 static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
842 struct hlist_node *tmp;
843 struct clk *child, *fail_clk = NULL;
844 int ret = NOTIFY_DONE;
846 if (clk->rate == clk->new_rate)
849 if (clk->notifier_count) {
850 ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
851 if (ret == NOTIFY_BAD)
855 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
856 clk = clk_propagate_rate_change(child, event);
865 * walk down a subtree and set the new rates notifying the rate
868 static void clk_change_rate(struct clk *clk)
871 unsigned long old_rate;
872 unsigned long best_parent_rate = 0;
873 struct hlist_node *tmp;
875 old_rate = clk->rate;
878 best_parent_rate = clk->parent->rate;
880 if (clk->ops->set_rate)
881 clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
883 if (clk->ops->recalc_rate)
884 clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
886 clk->rate = best_parent_rate;
888 if (clk->notifier_count && old_rate != clk->rate)
889 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
891 hlist_for_each_entry(child, tmp, &clk->children, child_node)
892 clk_change_rate(child);
896 * clk_set_rate - specify a new rate for clk
897 * @clk: the clk whose rate is being changed
898 * @rate: the new rate for clk
900 * In the simplest case clk_set_rate will only adjust the rate of clk.
902 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
903 * propagate up to clk's parent; whether or not this happens depends on the
904 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
905 * after calling .round_rate then upstream parent propagation is ignored. If
906 * *parent_rate comes back with a new rate for clk's parent then we propagate
907 * up to clk's parent and set it's rate. Upward propagation will continue
908 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
909 * .round_rate stops requesting changes to clk's parent_rate.
911 * Rate changes are accomplished via tree traversal that also recalculates the
912 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
914 * Returns 0 on success, -EERROR otherwise.
916 int clk_set_rate(struct clk *clk, unsigned long rate)
918 struct clk *top, *fail_clk;
921 /* prevent racing with updates to the clock topology */
922 mutex_lock(&prepare_lock);
924 /* bail early if nothing to do */
925 if (rate == clk->rate)
928 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
933 /* calculate new rates and get the topmost changed clock */
934 top = clk_calc_new_rates(clk, rate);
940 /* notify that we are about to change rates */
941 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
943 pr_warn("%s: failed to set %s rate\n", __func__,
945 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
950 /* change the rates */
951 clk_change_rate(top);
953 mutex_unlock(&prepare_lock);
957 mutex_unlock(&prepare_lock);
961 EXPORT_SYMBOL_GPL(clk_set_rate);
964 * clk_get_parent - return the parent of a clk
965 * @clk: the clk whose parent gets returned
967 * Simply returns clk->parent. Returns NULL if clk is NULL.
969 struct clk *clk_get_parent(struct clk *clk)
973 mutex_lock(&prepare_lock);
974 parent = __clk_get_parent(clk);
975 mutex_unlock(&prepare_lock);
979 EXPORT_SYMBOL_GPL(clk_get_parent);
982 * .get_parent is mandatory for clocks with multiple possible parents. It is
983 * optional for single-parent clocks. Always call .get_parent if it is
984 * available and WARN if it is missing for multi-parent clocks.
986 * For single-parent clocks without .get_parent, first check to see if the
987 * .parents array exists, and if so use it to avoid an expensive tree
988 * traversal. If .parents does not exist then walk the tree with __clk_lookup.
990 static struct clk *__clk_init_parent(struct clk *clk)
992 struct clk *ret = NULL;
995 /* handle the trivial cases */
997 if (!clk->num_parents)
1000 if (clk->num_parents == 1) {
1001 if (IS_ERR_OR_NULL(clk->parent))
1002 ret = clk->parent = __clk_lookup(clk->parent_names[0]);
1007 if (!clk->ops->get_parent) {
1008 WARN(!clk->ops->get_parent,
1009 "%s: multi-parent clocks must implement .get_parent\n",
1015 * Do our best to cache parent clocks in clk->parents. This prevents
1016 * unnecessary and expensive calls to __clk_lookup. We don't set
1017 * clk->parent here; that is done by the calling function
1020 index = clk->ops->get_parent(clk->hw);
1024 kzalloc((sizeof(struct clk*) * clk->num_parents),
1028 ret = __clk_lookup(clk->parent_names[index]);
1029 else if (!clk->parents[index])
1030 ret = clk->parents[index] =
1031 __clk_lookup(clk->parent_names[index]);
1033 ret = clk->parents[index];
1039 void __clk_reparent(struct clk *clk, struct clk *new_parent)
1041 #ifdef CONFIG_COMMON_CLK_DEBUG
1043 struct dentry *new_parent_d;
1046 if (!clk || !new_parent)
1049 hlist_del(&clk->child_node);
1052 hlist_add_head(&clk->child_node, &new_parent->children);
1054 hlist_add_head(&clk->child_node, &clk_orphan_list);
1056 #ifdef CONFIG_COMMON_CLK_DEBUG
1061 new_parent_d = new_parent->dentry;
1063 new_parent_d = orphandir;
1065 d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
1066 new_parent_d, clk->name);
1070 pr_debug("%s: failed to rename debugfs entry for %s\n",
1071 __func__, clk->name);
1075 clk->parent = new_parent;
1077 __clk_recalc_rates(clk, POST_RATE_CHANGE);
1080 static int __clk_set_parent(struct clk *clk, struct clk *parent)
1082 struct clk *old_parent;
1083 unsigned long flags;
1087 old_parent = clk->parent;
1090 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1094 * find index of new parent clock using cached parent ptrs,
1095 * or if not yet cached, use string name comparison and cache
1096 * them now to avoid future calls to __clk_lookup.
1098 for (i = 0; i < clk->num_parents; i++) {
1099 if (clk->parents && clk->parents[i] == parent)
1101 else if (!strcmp(clk->parent_names[i], parent->name)) {
1103 clk->parents[i] = __clk_lookup(parent->name);
1108 if (i == clk->num_parents) {
1109 pr_debug("%s: clock %s is not a possible parent of clock %s\n",
1110 __func__, parent->name, clk->name);
1114 /* migrate prepare and enable */
1115 if (clk->prepare_count)
1116 __clk_prepare(parent);
1118 /* FIXME replace with clk_is_enabled(clk) someday */
1119 spin_lock_irqsave(&enable_lock, flags);
1120 if (clk->enable_count)
1121 __clk_enable(parent);
1122 spin_unlock_irqrestore(&enable_lock, flags);
1124 /* change clock input source */
1125 ret = clk->ops->set_parent(clk->hw, i);
1127 /* clean up old prepare and enable */
1128 spin_lock_irqsave(&enable_lock, flags);
1129 if (clk->enable_count)
1130 __clk_disable(old_parent);
1131 spin_unlock_irqrestore(&enable_lock, flags);
1133 if (clk->prepare_count)
1134 __clk_unprepare(old_parent);
1141 * clk_set_parent - switch the parent of a mux clk
1142 * @clk: the mux clk whose input we are switching
1143 * @parent: the new input to clk
1145 * Re-parent clk to use parent as it's new input source. If clk has the
1146 * CLK_SET_PARENT_GATE flag set then clk must be gated for this
1147 * operation to succeed. After successfully changing clk's parent
1148 * clk_set_parent will update the clk topology, sysfs topology and
1149 * propagate rate recalculation via __clk_recalc_rates. Returns 0 on
1150 * success, -EERROR otherwise.
1152 int clk_set_parent(struct clk *clk, struct clk *parent)
1156 if (!clk || !clk->ops)
1159 if (!clk->ops->set_parent)
1162 /* prevent racing with updates to the clock topology */
1163 mutex_lock(&prepare_lock);
1165 if (clk->parent == parent)
1168 /* propagate PRE_RATE_CHANGE notifications */
1169 if (clk->notifier_count)
1170 ret = __clk_speculate_rates(clk, parent->rate);
1172 /* abort if a driver objects */
1173 if (ret == NOTIFY_STOP)
1176 /* only re-parent if the clock is not in use */
1177 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count)
1180 ret = __clk_set_parent(clk, parent);
1182 /* propagate ABORT_RATE_CHANGE if .set_parent failed */
1184 __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1188 /* propagate rate recalculation downstream */
1189 __clk_reparent(clk, parent);
1192 mutex_unlock(&prepare_lock);
1196 EXPORT_SYMBOL_GPL(clk_set_parent);
1199 * __clk_init - initialize the data structures in a struct clk
1200 * @dev: device initializing this clk, placeholder for now
1201 * @clk: clk being initialized
1203 * Initializes the lists in struct clk, queries the hardware for the
1204 * parent and rate and sets them both.
1206 int __clk_init(struct device *dev, struct clk *clk)
1210 struct hlist_node *tmp, *tmp2;
1215 mutex_lock(&prepare_lock);
1217 /* check to see if a clock with this name is already registered */
1218 if (__clk_lookup(clk->name)) {
1219 pr_debug("%s: clk %s already initialized\n",
1220 __func__, clk->name);
1225 /* check that clk_ops are sane. See Documentation/clk.txt */
1226 if (clk->ops->set_rate &&
1227 !(clk->ops->round_rate && clk->ops->recalc_rate)) {
1228 pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
1229 __func__, clk->name);
1234 if (clk->ops->set_parent && !clk->ops->get_parent) {
1235 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1236 __func__, clk->name);
1241 /* throw a WARN if any entries in parent_names are NULL */
1242 for (i = 0; i < clk->num_parents; i++)
1243 WARN(!clk->parent_names[i],
1244 "%s: invalid NULL in %s's .parent_names\n",
1245 __func__, clk->name);
1248 * Allocate an array of struct clk *'s to avoid unnecessary string
1249 * look-ups of clk's possible parents. This can fail for clocks passed
1250 * in to clk_init during early boot; thus any access to clk->parents[]
1251 * must always check for a NULL pointer and try to populate it if
1254 * If clk->parents is not NULL we skip this entire block. This allows
1255 * for clock drivers to statically initialize clk->parents.
1257 if (clk->num_parents > 1 && !clk->parents) {
1258 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1261 * __clk_lookup returns NULL for parents that have not been
1262 * clk_init'd; thus any access to clk->parents[] must check
1263 * for a NULL pointer. We can always perform lazy lookups for
1264 * missing parents later on.
1267 for (i = 0; i < clk->num_parents; i++)
1269 __clk_lookup(clk->parent_names[i]);
1272 clk->parent = __clk_init_parent(clk);
1275 * Populate clk->parent if parent has already been __clk_init'd. If
1276 * parent has not yet been __clk_init'd then place clk in the orphan
1277 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
1280 * Every time a new clk is clk_init'd then we walk the list of orphan
1281 * clocks and re-parent any that are children of the clock currently
1285 hlist_add_head(&clk->child_node,
1286 &clk->parent->children);
1287 else if (clk->flags & CLK_IS_ROOT)
1288 hlist_add_head(&clk->child_node, &clk_root_list);
1290 hlist_add_head(&clk->child_node, &clk_orphan_list);
1293 * Set clk's rate. The preferred method is to use .recalc_rate. For
1294 * simple clocks and lazy developers the default fallback is to use the
1295 * parent's rate. If a clock doesn't have a parent (or is orphaned)
1296 * then rate is set to zero.
1298 if (clk->ops->recalc_rate)
1299 clk->rate = clk->ops->recalc_rate(clk->hw,
1300 __clk_get_rate(clk->parent));
1301 else if (clk->parent)
1302 clk->rate = clk->parent->rate;
1307 * walk the list of orphan clocks and reparent any that are children of
1310 hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node) {
1311 if (orphan->ops->get_parent) {
1312 i = orphan->ops->get_parent(orphan->hw);
1313 if (!strcmp(clk->name, orphan->parent_names[i]))
1314 __clk_reparent(orphan, clk);
1318 for (i = 0; i < orphan->num_parents; i++)
1319 if (!strcmp(clk->name, orphan->parent_names[i])) {
1320 __clk_reparent(orphan, clk);
1326 * optional platform-specific magic
1328 * The .init callback is not used by any of the basic clock types, but
1329 * exists for weird hardware that must perform initialization magic.
1330 * Please consider other ways of solving initialization problems before
1331 * using this callback, as it's use is discouraged.
1334 clk->ops->init(clk->hw);
1336 clk_debug_register(clk);
1339 mutex_unlock(&prepare_lock);
1345 * __clk_register - register a clock and return a cookie.
1347 * Same as clk_register, except that the .clk field inside hw shall point to a
1348 * preallocated (generally statically allocated) struct clk. None of the fields
1349 * of the struct clk need to be initialized.
1351 * The data pointed to by .init and .clk field shall NOT be marked as init
1354 * __clk_register is only exposed via clk-private.h and is intended for use with
1355 * very large numbers of clocks that need to be statically initialized. It is
1356 * a layering violation to include clk-private.h from any code which implements
1357 * a clock's .ops; as such any statically initialized clock data MUST be in a
1358 * separate C file from the logic that implements it's operations. Returns 0
1359 * on success, otherwise an error code.
1361 struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1367 clk->name = hw->init->name;
1368 clk->ops = hw->init->ops;
1370 clk->flags = hw->init->flags;
1371 clk->parent_names = hw->init->parent_names;
1372 clk->num_parents = hw->init->num_parents;
1374 ret = __clk_init(dev, clk);
1376 return ERR_PTR(ret);
1380 EXPORT_SYMBOL_GPL(__clk_register);
1382 static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
1386 clk->name = kstrdup(hw->init->name, GFP_KERNEL);
1388 pr_err("%s: could not allocate clk->name\n", __func__);
1392 clk->ops = hw->init->ops;
1394 clk->flags = hw->init->flags;
1395 clk->num_parents = hw->init->num_parents;
1398 /* allocate local copy in case parent_names is __initdata */
1399 clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
1402 if (!clk->parent_names) {
1403 pr_err("%s: could not allocate clk->parent_names\n", __func__);
1405 goto fail_parent_names;
1409 /* copy each string name in case parent_names is __initdata */
1410 for (i = 0; i < clk->num_parents; i++) {
1411 clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
1413 if (!clk->parent_names[i]) {
1414 pr_err("%s: could not copy parent_names\n", __func__);
1416 goto fail_parent_names_copy;
1420 ret = __clk_init(dev, clk);
1424 fail_parent_names_copy:
1426 kfree(clk->parent_names[i]);
1427 kfree(clk->parent_names);
1435 * clk_register - allocate a new clock, register it and return an opaque cookie
1436 * @dev: device that is registering this clock
1437 * @hw: link to hardware-specific clock data
1439 * clk_register is the primary interface for populating the clock tree with new
1440 * clock nodes. It returns a pointer to the newly allocated struct clk which
1441 * cannot be dereferenced by driver code but may be used in conjuction with the
1442 * rest of the clock API. In the event of an error clk_register will return an
1443 * error code; drivers must test for an error code after calling clk_register.
1445 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
1450 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
1452 pr_err("%s: could not allocate clk\n", __func__);
1457 ret = _clk_register(dev, hw, clk);
1463 return ERR_PTR(ret);
1465 EXPORT_SYMBOL_GPL(clk_register);
1468 * clk_unregister - unregister a currently registered clock
1469 * @clk: clock to unregister
1471 * Currently unimplemented.
1473 void clk_unregister(struct clk *clk) {}
1474 EXPORT_SYMBOL_GPL(clk_unregister);
1476 static void devm_clk_release(struct device *dev, void *res)
1478 clk_unregister(res);
1482 * devm_clk_register - resource managed clk_register()
1483 * @dev: device that is registering this clock
1484 * @hw: link to hardware-specific clock data
1486 * Managed clk_register(). Clocks returned from this function are
1487 * automatically clk_unregister()ed on driver detach. See clk_register() for
1490 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
1495 clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL);
1497 return ERR_PTR(-ENOMEM);
1499 ret = _clk_register(dev, hw, clk);
1501 devres_add(dev, clk);
1509 EXPORT_SYMBOL_GPL(devm_clk_register);
1511 static int devm_clk_match(struct device *dev, void *res, void *data)
1513 struct clk *c = res;
1520 * devm_clk_unregister - resource managed clk_unregister()
1521 * @clk: clock to unregister
1523 * Deallocate a clock allocated with devm_clk_register(). Normally
1524 * this function will not need to be called and the resource management
1525 * code will ensure that the resource is freed.
1527 void devm_clk_unregister(struct device *dev, struct clk *clk)
1529 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
1531 EXPORT_SYMBOL_GPL(devm_clk_unregister);
1533 /*** clk rate change notifiers ***/
1536 * clk_notifier_register - add a clk rate change notifier
1537 * @clk: struct clk * to watch
1538 * @nb: struct notifier_block * with callback info
1540 * Request notification when clk's rate changes. This uses an SRCU
1541 * notifier because we want it to block and notifier unregistrations are
1542 * uncommon. The callbacks associated with the notifier must not
1543 * re-enter into the clk framework by calling any top-level clk APIs;
1544 * this will cause a nested prepare_lock mutex.
1546 * Pre-change notifier callbacks will be passed the current, pre-change
1547 * rate of the clk via struct clk_notifier_data.old_rate. The new,
1548 * post-change rate of the clk is passed via struct
1549 * clk_notifier_data.new_rate.
1551 * Post-change notifiers will pass the now-current, post-change rate of
1552 * the clk in both struct clk_notifier_data.old_rate and struct
1553 * clk_notifier_data.new_rate.
1555 * Abort-change notifiers are effectively the opposite of pre-change
1556 * notifiers: the original pre-change clk rate is passed in via struct
1557 * clk_notifier_data.new_rate and the failed post-change rate is passed
1558 * in via struct clk_notifier_data.old_rate.
1560 * clk_notifier_register() must be called from non-atomic context.
1561 * Returns -EINVAL if called with null arguments, -ENOMEM upon
1562 * allocation failure; otherwise, passes along the return value of
1563 * srcu_notifier_chain_register().
1565 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
1567 struct clk_notifier *cn;
1573 mutex_lock(&prepare_lock);
1575 /* search the list of notifiers for this clk */
1576 list_for_each_entry(cn, &clk_notifier_list, node)
1580 /* if clk wasn't in the notifier list, allocate new clk_notifier */
1581 if (cn->clk != clk) {
1582 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
1587 srcu_init_notifier_head(&cn->notifier_head);
1589 list_add(&cn->node, &clk_notifier_list);
1592 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
1594 clk->notifier_count++;
1597 mutex_unlock(&prepare_lock);
1601 EXPORT_SYMBOL_GPL(clk_notifier_register);
1604 * clk_notifier_unregister - remove a clk rate change notifier
1605 * @clk: struct clk *
1606 * @nb: struct notifier_block * with callback info
1608 * Request no further notification for changes to 'clk' and frees memory
1609 * allocated in clk_notifier_register.
1611 * Returns -EINVAL if called with null arguments; otherwise, passes
1612 * along the return value of srcu_notifier_chain_unregister().
1614 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
1616 struct clk_notifier *cn = NULL;
1622 mutex_lock(&prepare_lock);
1624 list_for_each_entry(cn, &clk_notifier_list, node)
1628 if (cn->clk == clk) {
1629 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
1631 clk->notifier_count--;
1633 /* XXX the notifier code should handle this better */
1634 if (!cn->notifier_head.head) {
1635 srcu_cleanup_notifier_head(&cn->notifier_head);
1643 mutex_unlock(&prepare_lock);
1647 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
1651 * struct of_clk_provider - Clock provider registration structure
1652 * @link: Entry in global list of clock providers
1653 * @node: Pointer to device tree node of clock provider
1654 * @get: Get clock callback. Returns NULL or a struct clk for the
1655 * given clock specifier
1656 * @data: context pointer to be passed into @get callback
1658 struct of_clk_provider {
1659 struct list_head link;
1661 struct device_node *node;
1662 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
1666 static LIST_HEAD(of_clk_providers);
1667 static DEFINE_MUTEX(of_clk_lock);
1669 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
1674 EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
1676 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
1678 struct clk_onecell_data *clk_data = data;
1679 unsigned int idx = clkspec->args[0];
1681 if (idx >= clk_data->clk_num) {
1682 pr_err("%s: invalid clock index %d\n", __func__, idx);
1683 return ERR_PTR(-EINVAL);
1686 return clk_data->clks[idx];
1688 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
1691 * of_clk_add_provider() - Register a clock provider for a node
1692 * @np: Device node pointer associated with clock provider
1693 * @clk_src_get: callback for decoding clock
1694 * @data: context pointer for @clk_src_get callback.
1696 int of_clk_add_provider(struct device_node *np,
1697 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
1701 struct of_clk_provider *cp;
1703 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
1707 cp->node = of_node_get(np);
1709 cp->get = clk_src_get;
1711 mutex_lock(&of_clk_lock);
1712 list_add(&cp->link, &of_clk_providers);
1713 mutex_unlock(&of_clk_lock);
1714 pr_debug("Added clock from %s\n", np->full_name);
1718 EXPORT_SYMBOL_GPL(of_clk_add_provider);
1721 * of_clk_del_provider() - Remove a previously registered clock provider
1722 * @np: Device node pointer associated with clock provider
1724 void of_clk_del_provider(struct device_node *np)
1726 struct of_clk_provider *cp;
1728 mutex_lock(&of_clk_lock);
1729 list_for_each_entry(cp, &of_clk_providers, link) {
1730 if (cp->node == np) {
1731 list_del(&cp->link);
1732 of_node_put(cp->node);
1737 mutex_unlock(&of_clk_lock);
1739 EXPORT_SYMBOL_GPL(of_clk_del_provider);
1741 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
1743 struct of_clk_provider *provider;
1744 struct clk *clk = ERR_PTR(-ENOENT);
1746 /* Check if we have such a provider in our array */
1747 mutex_lock(&of_clk_lock);
1748 list_for_each_entry(provider, &of_clk_providers, link) {
1749 if (provider->node == clkspec->np)
1750 clk = provider->get(clkspec, provider->data);
1754 mutex_unlock(&of_clk_lock);
1759 const char *of_clk_get_parent_name(struct device_node *np, int index)
1761 struct of_phandle_args clkspec;
1762 const char *clk_name;
1768 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
1773 if (of_property_read_string_index(clkspec.np, "clock-output-names",
1774 clkspec.args_count ? clkspec.args[0] : 0,
1776 clk_name = clkspec.np->name;
1778 of_node_put(clkspec.np);
1781 EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
1784 * of_clk_init() - Scan and init clock providers from the DT
1785 * @matches: array of compatible values and init functions for providers.
1787 * This function scans the device tree for matching clock providers and
1788 * calls their initialization functions
1790 void __init of_clk_init(const struct of_device_id *matches)
1792 struct device_node *np;
1794 for_each_matching_node(np, matches) {
1795 const struct of_device_id *match = of_match_node(matches, np);
1796 of_clk_init_cb_t clk_init_cb = match->data;