6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/atomic.h>
12 #include <linux/clk.h>
13 #include <linux/device.h>
14 #include <linux/errno.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
19 #include <linux/slab.h>
20 #include <linux/string.h>
22 #include <media/v4l2-clk.h>
23 #include <media/v4l2-subdev.h>
25 static DEFINE_MUTEX(clk_lock);
26 static LIST_HEAD(clk_list);
28 static struct v4l2_clk *v4l2_clk_find(const char *dev_id)
32 list_for_each_entry(clk, &clk_list, list)
33 if (!strcmp(dev_id, clk->dev_id))
36 return ERR_PTR(-ENODEV);
39 struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id)
42 struct clk *ccf_clk = clk_get(dev, id);
43 char clk_name[V4L2_CLK_NAME_SIZE];
45 if (PTR_ERR(ccf_clk) == -EPROBE_DEFER)
46 return ERR_PTR(-EPROBE_DEFER);
48 if (!IS_ERR_OR_NULL(ccf_clk)) {
49 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
52 return ERR_PTR(-ENOMEM);
59 mutex_lock(&clk_lock);
60 clk = v4l2_clk_find(dev_name(dev));
62 /* if dev_name is not found, try use the OF name to find again */
63 if (PTR_ERR(clk) == -ENODEV && dev->of_node) {
64 v4l2_clk_name_of(clk_name, sizeof(clk_name), dev->of_node);
65 clk = v4l2_clk_find(clk_name);
69 atomic_inc(&clk->use_count);
70 mutex_unlock(&clk_lock);
74 EXPORT_SYMBOL(v4l2_clk_get);
76 void v4l2_clk_put(struct v4l2_clk *clk)
89 mutex_lock(&clk_lock);
91 list_for_each_entry(tmp, &clk_list, list)
93 atomic_dec(&clk->use_count);
95 mutex_unlock(&clk_lock);
97 EXPORT_SYMBOL(v4l2_clk_put);
99 static int v4l2_clk_lock_driver(struct v4l2_clk *clk)
101 struct v4l2_clk *tmp;
104 mutex_lock(&clk_lock);
106 list_for_each_entry(tmp, &clk_list, list)
108 ret = !try_module_get(clk->ops->owner);
114 mutex_unlock(&clk_lock);
119 static void v4l2_clk_unlock_driver(struct v4l2_clk *clk)
121 module_put(clk->ops->owner);
124 int v4l2_clk_enable(struct v4l2_clk *clk)
129 return clk_prepare_enable(clk->clk);
131 ret = v4l2_clk_lock_driver(clk);
135 mutex_lock(&clk->lock);
137 if (++clk->enable == 1 && clk->ops->enable) {
138 ret = clk->ops->enable(clk);
143 mutex_unlock(&clk->lock);
147 EXPORT_SYMBOL(v4l2_clk_enable);
150 * You might Oops if you try to disabled a disabled clock, because then the
151 * driver isn't locked and could have been unloaded by now, so, don't do that
153 void v4l2_clk_disable(struct v4l2_clk *clk)
158 return clk_disable_unprepare(clk->clk);
160 mutex_lock(&clk->lock);
162 enable = --clk->enable;
163 if (WARN(enable < 0, "Unbalanced %s() on %s!\n", __func__,
166 else if (!enable && clk->ops->disable)
167 clk->ops->disable(clk);
169 mutex_unlock(&clk->lock);
171 v4l2_clk_unlock_driver(clk);
173 EXPORT_SYMBOL(v4l2_clk_disable);
175 unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk)
180 return clk_get_rate(clk->clk);
182 ret = v4l2_clk_lock_driver(clk);
186 mutex_lock(&clk->lock);
187 if (!clk->ops->get_rate)
190 ret = clk->ops->get_rate(clk);
191 mutex_unlock(&clk->lock);
193 v4l2_clk_unlock_driver(clk);
197 EXPORT_SYMBOL(v4l2_clk_get_rate);
199 int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate)
204 long r = clk_round_rate(clk->clk, rate);
207 return clk_set_rate(clk->clk, r);
210 ret = v4l2_clk_lock_driver(clk);
215 mutex_lock(&clk->lock);
216 if (!clk->ops->set_rate)
219 ret = clk->ops->set_rate(clk, rate);
220 mutex_unlock(&clk->lock);
222 v4l2_clk_unlock_driver(clk);
226 EXPORT_SYMBOL(v4l2_clk_set_rate);
228 struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
232 struct v4l2_clk *clk;
236 return ERR_PTR(-EINVAL);
238 clk = kzalloc(sizeof(struct v4l2_clk), GFP_KERNEL);
240 return ERR_PTR(-ENOMEM);
242 clk->dev_id = kstrdup(dev_id, GFP_KERNEL);
249 atomic_set(&clk->use_count, 0);
250 mutex_init(&clk->lock);
252 mutex_lock(&clk_lock);
253 if (!IS_ERR(v4l2_clk_find(dev_id))) {
254 mutex_unlock(&clk_lock);
258 list_add_tail(&clk->list, &clk_list);
259 mutex_unlock(&clk_lock);
269 EXPORT_SYMBOL(v4l2_clk_register);
271 void v4l2_clk_unregister(struct v4l2_clk *clk)
273 if (WARN(atomic_read(&clk->use_count),
274 "%s(): Refusing to unregister ref-counted %s clock!\n",
275 __func__, clk->dev_id))
278 mutex_lock(&clk_lock);
279 list_del(&clk->list);
280 mutex_unlock(&clk_lock);
285 EXPORT_SYMBOL(v4l2_clk_unregister);
287 struct v4l2_clk_fixed {
289 struct v4l2_clk_ops ops;
292 static unsigned long fixed_get_rate(struct v4l2_clk *clk)
294 struct v4l2_clk_fixed *priv = clk->priv;
298 struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
299 unsigned long rate, struct module *owner)
301 struct v4l2_clk *clk;
302 struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
305 return ERR_PTR(-ENOMEM);
308 priv->ops.get_rate = fixed_get_rate;
309 priv->ops.owner = owner;
311 clk = v4l2_clk_register(&priv->ops, dev_id, priv);
317 EXPORT_SYMBOL(__v4l2_clk_register_fixed);
319 void v4l2_clk_unregister_fixed(struct v4l2_clk *clk)
322 v4l2_clk_unregister(clk);
324 EXPORT_SYMBOL(v4l2_clk_unregister_fixed);