1 // SPDX-License-Identifier: GPL-2.0-only
3 * V4L2 asynchronous subdevice registration API
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/i2c.h>
12 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
17 #include <linux/platform_device.h>
18 #include <linux/seq_file.h>
19 #include <linux/slab.h>
20 #include <linux/types.h>
22 #include <media/v4l2-async.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-fwnode.h>
25 #include <media/v4l2-subdev.h>
27 #include "v4l2-subdev-priv.h"
29 static int v4l2_async_nf_call_bound(struct v4l2_async_notifier *n,
30 struct v4l2_subdev *subdev,
31 struct v4l2_async_subdev *asd)
33 if (!n->ops || !n->ops->bound)
36 return n->ops->bound(n, subdev, asd);
39 static void v4l2_async_nf_call_unbind(struct v4l2_async_notifier *n,
40 struct v4l2_subdev *subdev,
41 struct v4l2_async_subdev *asd)
43 if (!n->ops || !n->ops->unbind)
46 n->ops->unbind(n, subdev, asd);
49 static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n)
51 if (!n->ops || !n->ops->complete)
54 return n->ops->complete(n);
57 static void v4l2_async_nf_call_destroy(struct v4l2_async_notifier *n,
58 struct v4l2_async_subdev *asd)
60 if (!n->ops || !n->ops->destroy)
66 static bool match_i2c(struct v4l2_async_notifier *notifier,
67 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
69 #if IS_ENABLED(CONFIG_I2C)
70 struct i2c_client *client = i2c_verify_client(sd->dev);
73 asd->match.i2c.adapter_id == client->adapter->nr &&
74 asd->match.i2c.address == client->addr;
81 match_fwnode_one(struct v4l2_async_notifier *notifier,
82 struct v4l2_subdev *sd, struct fwnode_handle *sd_fwnode,
83 struct v4l2_async_subdev *asd)
85 struct fwnode_handle *other_fwnode;
86 struct fwnode_handle *dev_fwnode;
87 bool asd_fwnode_is_ep;
92 * Both the subdev and the async subdev can provide either an endpoint
93 * fwnode or a device fwnode. Start with the simple case of direct
96 if (sd_fwnode == asd->match.fwnode)
100 * Otherwise, check if the sd fwnode and the asd fwnode refer to an
101 * endpoint or a device. If they're of the same type, there's no match.
102 * Technically speaking this checks if the nodes refer to a connected
103 * endpoint, which is the simplest check that works for both OF and
104 * ACPI. This won't make a difference, as drivers should not try to
105 * match unconnected endpoints.
107 sd_fwnode_is_ep = fwnode_graph_is_endpoint(sd_fwnode);
108 asd_fwnode_is_ep = fwnode_graph_is_endpoint(asd->match.fwnode);
110 if (sd_fwnode_is_ep == asd_fwnode_is_ep)
114 * The sd and asd fwnodes are of different types. Get the device fwnode
115 * parent of the endpoint fwnode, and compare it with the other fwnode.
117 if (sd_fwnode_is_ep) {
118 dev_fwnode = fwnode_graph_get_port_parent(sd_fwnode);
119 other_fwnode = asd->match.fwnode;
121 dev_fwnode = fwnode_graph_get_port_parent(asd->match.fwnode);
122 other_fwnode = sd_fwnode;
125 fwnode_handle_put(dev_fwnode);
127 if (dev_fwnode != other_fwnode)
131 * We have a heterogeneous match. Retrieve the struct device of the side
132 * that matched on a device fwnode to print its driver name.
135 dev = notifier->v4l2_dev ? notifier->v4l2_dev->dev
140 if (dev && dev->driver) {
142 dev_warn(dev, "Driver %s uses device fwnode, incorrect match may occur\n",
144 dev_notice(dev, "Consider updating driver %s to match on endpoints\n",
151 static bool match_fwnode(struct v4l2_async_notifier *notifier,
152 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
154 if (match_fwnode_one(notifier, sd, sd->fwnode, asd))
157 /* Also check the secondary fwnode. */
158 if (IS_ERR_OR_NULL(sd->fwnode->secondary))
161 return match_fwnode_one(notifier, sd, sd->fwnode->secondary, asd);
164 static LIST_HEAD(subdev_list);
165 static LIST_HEAD(notifier_list);
166 static DEFINE_MUTEX(list_lock);
168 static struct v4l2_async_subdev *
169 v4l2_async_find_match(struct v4l2_async_notifier *notifier,
170 struct v4l2_subdev *sd)
172 bool (*match)(struct v4l2_async_notifier *notifier,
173 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd);
174 struct v4l2_async_subdev *asd;
176 list_for_each_entry(asd, ¬ifier->waiting, list) {
177 /* bus_type has been verified valid before */
178 switch (asd->match_type) {
179 case V4L2_ASYNC_MATCH_I2C:
182 case V4L2_ASYNC_MATCH_FWNODE:
183 match = match_fwnode;
186 /* Cannot happen, unless someone breaks us */
191 /* match cannot be NULL here */
192 if (match(notifier, sd, asd))
199 /* Compare two async sub-device descriptors for equivalence */
200 static bool asd_equal(struct v4l2_async_subdev *asd_x,
201 struct v4l2_async_subdev *asd_y)
203 if (asd_x->match_type != asd_y->match_type)
206 switch (asd_x->match_type) {
207 case V4L2_ASYNC_MATCH_I2C:
208 return asd_x->match.i2c.adapter_id ==
209 asd_y->match.i2c.adapter_id &&
210 asd_x->match.i2c.address ==
211 asd_y->match.i2c.address;
212 case V4L2_ASYNC_MATCH_FWNODE:
213 return asd_x->match.fwnode == asd_y->match.fwnode;
221 /* Find the sub-device notifier registered by a sub-device driver. */
222 static struct v4l2_async_notifier *
223 v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
225 struct v4l2_async_notifier *n;
227 list_for_each_entry(n, ¬ifier_list, list)
234 /* Get v4l2_device related to the notifier if one can be found. */
235 static struct v4l2_device *
236 v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier *notifier)
238 while (notifier->parent)
239 notifier = notifier->parent;
241 return notifier->v4l2_dev;
245 * Return true if all child sub-device notifiers are complete, false otherwise.
248 v4l2_async_nf_can_complete(struct v4l2_async_notifier *notifier)
250 struct v4l2_subdev *sd;
252 if (!list_empty(¬ifier->waiting))
255 list_for_each_entry(sd, ¬ifier->done, async_list) {
256 struct v4l2_async_notifier *subdev_notifier =
257 v4l2_async_find_subdev_notifier(sd);
259 if (subdev_notifier &&
260 !v4l2_async_nf_can_complete(subdev_notifier))
268 * Complete the master notifier if possible. This is done when all async
269 * sub-devices have been bound; v4l2_device is also available then.
272 v4l2_async_nf_try_complete(struct v4l2_async_notifier *notifier)
274 /* Quick check whether there are still more sub-devices here. */
275 if (!list_empty(¬ifier->waiting))
278 /* Check the entire notifier tree; find the root notifier first. */
279 while (notifier->parent)
280 notifier = notifier->parent;
282 /* This is root if it has v4l2_dev. */
283 if (!notifier->v4l2_dev)
286 /* Is everything ready? */
287 if (!v4l2_async_nf_can_complete(notifier))
290 return v4l2_async_nf_call_complete(notifier);
294 v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier);
296 static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n,
297 struct v4l2_subdev *sd)
299 struct media_link *link = NULL;
301 #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
303 if (sd->entity.function != MEDIA_ENT_F_LENS &&
304 sd->entity.function != MEDIA_ENT_F_FLASH)
307 link = media_create_ancillary_link(&n->sd->entity, &sd->entity);
311 return IS_ERR(link) ? PTR_ERR(link) : 0;
314 static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
315 struct v4l2_device *v4l2_dev,
316 struct v4l2_subdev *sd,
317 struct v4l2_async_subdev *asd)
319 struct v4l2_async_notifier *subdev_notifier;
322 ret = v4l2_device_register_subdev(v4l2_dev, sd);
326 ret = v4l2_async_nf_call_bound(notifier, sd, asd);
328 v4l2_device_unregister_subdev(sd);
333 * Depending of the function of the entities involved, we may want to
334 * create links between them (for example between a sensor and its lens
335 * or between a sensor's source pad and the connected device's sink
338 ret = v4l2_async_create_ancillary_links(notifier, sd);
340 v4l2_async_nf_call_unbind(notifier, sd, asd);
341 v4l2_device_unregister_subdev(sd);
345 /* Remove from the waiting list */
346 list_del(&asd->list);
348 sd->notifier = notifier;
350 /* Move from the global subdevice list to notifier's done */
351 list_move(&sd->async_list, ¬ifier->done);
354 * See if the sub-device has a notifier. If not, return here.
356 subdev_notifier = v4l2_async_find_subdev_notifier(sd);
357 if (!subdev_notifier || subdev_notifier->parent)
361 * Proceed with checking for the sub-device notifier's async
362 * sub-devices, and return the result. The error will be handled by the
365 subdev_notifier->parent = notifier;
367 return v4l2_async_nf_try_all_subdevs(subdev_notifier);
370 /* Test all async sub-devices in a notifier for a match. */
372 v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier)
374 struct v4l2_device *v4l2_dev =
375 v4l2_async_nf_find_v4l2_dev(notifier);
376 struct v4l2_subdev *sd;
382 list_for_each_entry(sd, &subdev_list, async_list) {
383 struct v4l2_async_subdev *asd;
386 asd = v4l2_async_find_match(notifier, sd);
390 ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
395 * v4l2_async_match_notify() may lead to registering a
396 * new notifier and thus changing the async subdevs
397 * list. In order to proceed safely from here, restart
398 * parsing the list from the beginning.
406 static void v4l2_async_cleanup(struct v4l2_subdev *sd)
408 v4l2_device_unregister_subdev(sd);
410 * Subdevice driver will reprobe and put the subdev back
413 list_del_init(&sd->async_list);
417 /* Unbind all sub-devices in the notifier tree. */
419 v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier,
422 struct v4l2_subdev *sd, *tmp;
424 list_for_each_entry_safe(sd, tmp, ¬ifier->done, async_list) {
425 struct v4l2_async_notifier *subdev_notifier =
426 v4l2_async_find_subdev_notifier(sd);
429 v4l2_async_nf_unbind_all_subdevs(subdev_notifier, true);
431 v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
433 list_add_tail(&sd->asd->list, ¬ifier->waiting);
434 v4l2_async_cleanup(sd);
436 list_move(&sd->async_list, &subdev_list);
439 notifier->parent = NULL;
442 /* See if an async sub-device can be found in a notifier's lists. */
444 __v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
445 struct v4l2_async_subdev *asd)
447 struct v4l2_async_subdev *asd_y;
448 struct v4l2_subdev *sd;
450 list_for_each_entry(asd_y, ¬ifier->waiting, list)
451 if (asd_equal(asd, asd_y))
454 list_for_each_entry(sd, ¬ifier->done, async_list) {
455 if (WARN_ON(!sd->asd))
458 if (asd_equal(asd, sd->asd))
466 * Find out whether an async sub-device was set up already or
467 * whether it exists in a given notifier before @this_index.
468 * If @this_index < 0, search the notifier's entire @asd_list.
471 v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
472 struct v4l2_async_subdev *asd, int this_index)
474 struct v4l2_async_subdev *asd_y;
477 lockdep_assert_held(&list_lock);
479 /* Check that an asd is not being added more than once. */
480 list_for_each_entry(asd_y, ¬ifier->asd_list, asd_list) {
481 if (this_index >= 0 && j++ >= this_index)
483 if (asd_equal(asd, asd_y))
487 /* Check that an asd does not exist in other notifiers. */
488 list_for_each_entry(notifier, ¬ifier_list, list)
489 if (__v4l2_async_nf_has_async_subdev(notifier, asd))
495 static int v4l2_async_nf_asd_valid(struct v4l2_async_notifier *notifier,
496 struct v4l2_async_subdev *asd,
500 notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
505 switch (asd->match_type) {
506 case V4L2_ASYNC_MATCH_I2C:
507 case V4L2_ASYNC_MATCH_FWNODE:
508 if (v4l2_async_nf_has_async_subdev(notifier, asd, this_index)) {
509 dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n");
514 dev_err(dev, "Invalid match type %u on %p\n",
515 asd->match_type, asd);
522 void v4l2_async_nf_init(struct v4l2_async_notifier *notifier)
524 INIT_LIST_HEAD(¬ifier->asd_list);
526 EXPORT_SYMBOL(v4l2_async_nf_init);
528 static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
530 struct v4l2_async_subdev *asd;
533 INIT_LIST_HEAD(¬ifier->waiting);
534 INIT_LIST_HEAD(¬ifier->done);
536 mutex_lock(&list_lock);
538 list_for_each_entry(asd, ¬ifier->asd_list, asd_list) {
539 ret = v4l2_async_nf_asd_valid(notifier, asd, i++);
543 list_add_tail(&asd->list, ¬ifier->waiting);
546 ret = v4l2_async_nf_try_all_subdevs(notifier);
550 ret = v4l2_async_nf_try_complete(notifier);
554 /* Keep also completed notifiers on the list */
555 list_add(¬ifier->list, ¬ifier_list);
557 mutex_unlock(&list_lock);
563 * On failure, unbind all sub-devices registered through this notifier.
565 v4l2_async_nf_unbind_all_subdevs(notifier, false);
568 mutex_unlock(&list_lock);
573 int v4l2_async_nf_register(struct v4l2_device *v4l2_dev,
574 struct v4l2_async_notifier *notifier)
578 if (WARN_ON(!v4l2_dev || notifier->sd))
581 notifier->v4l2_dev = v4l2_dev;
583 ret = __v4l2_async_nf_register(notifier);
585 notifier->v4l2_dev = NULL;
589 EXPORT_SYMBOL(v4l2_async_nf_register);
591 int v4l2_async_subdev_nf_register(struct v4l2_subdev *sd,
592 struct v4l2_async_notifier *notifier)
596 if (WARN_ON(!sd || notifier->v4l2_dev))
601 ret = __v4l2_async_nf_register(notifier);
607 EXPORT_SYMBOL(v4l2_async_subdev_nf_register);
610 __v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
612 if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
615 v4l2_async_nf_unbind_all_subdevs(notifier, false);
618 notifier->v4l2_dev = NULL;
620 list_del(¬ifier->list);
623 void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
625 mutex_lock(&list_lock);
627 __v4l2_async_nf_unregister(notifier);
629 mutex_unlock(&list_lock);
631 EXPORT_SYMBOL(v4l2_async_nf_unregister);
633 static void __v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
635 struct v4l2_async_subdev *asd, *tmp;
637 if (!notifier || !notifier->asd_list.next)
640 list_for_each_entry_safe(asd, tmp, ¬ifier->asd_list, asd_list) {
641 switch (asd->match_type) {
642 case V4L2_ASYNC_MATCH_FWNODE:
643 fwnode_handle_put(asd->match.fwnode);
649 list_del(&asd->asd_list);
650 v4l2_async_nf_call_destroy(notifier, asd);
655 void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
657 mutex_lock(&list_lock);
659 __v4l2_async_nf_cleanup(notifier);
661 mutex_unlock(&list_lock);
663 EXPORT_SYMBOL_GPL(v4l2_async_nf_cleanup);
665 int __v4l2_async_nf_add_subdev(struct v4l2_async_notifier *notifier,
666 struct v4l2_async_subdev *asd)
670 mutex_lock(&list_lock);
672 ret = v4l2_async_nf_asd_valid(notifier, asd, -1);
676 list_add_tail(&asd->asd_list, ¬ifier->asd_list);
679 mutex_unlock(&list_lock);
682 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_subdev);
684 struct v4l2_async_subdev *
685 __v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
686 struct fwnode_handle *fwnode,
687 unsigned int asd_struct_size)
689 struct v4l2_async_subdev *asd;
692 asd = kzalloc(asd_struct_size, GFP_KERNEL);
694 return ERR_PTR(-ENOMEM);
696 asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
697 asd->match.fwnode = fwnode_handle_get(fwnode);
699 ret = __v4l2_async_nf_add_subdev(notifier, asd);
701 fwnode_handle_put(fwnode);
708 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode);
710 struct v4l2_async_subdev *
711 __v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
712 struct fwnode_handle *endpoint,
713 unsigned int asd_struct_size)
715 struct v4l2_async_subdev *asd;
716 struct fwnode_handle *remote;
718 remote = fwnode_graph_get_remote_endpoint(endpoint);
720 return ERR_PTR(-ENOTCONN);
722 asd = __v4l2_async_nf_add_fwnode(notif, remote, asd_struct_size);
724 * Calling __v4l2_async_nf_add_fwnode grabs a refcount,
725 * so drop the one we got in fwnode_graph_get_remote_port_parent.
727 fwnode_handle_put(remote);
730 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode_remote);
732 struct v4l2_async_subdev *
733 __v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, int adapter_id,
734 unsigned short address, unsigned int asd_struct_size)
736 struct v4l2_async_subdev *asd;
739 asd = kzalloc(asd_struct_size, GFP_KERNEL);
741 return ERR_PTR(-ENOMEM);
743 asd->match_type = V4L2_ASYNC_MATCH_I2C;
744 asd->match.i2c.adapter_id = adapter_id;
745 asd->match.i2c.address = address;
747 ret = __v4l2_async_nf_add_subdev(notifier, asd);
755 EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_i2c);
757 int v4l2_async_register_subdev(struct v4l2_subdev *sd)
759 struct v4l2_async_notifier *subdev_notifier;
760 struct v4l2_async_notifier *notifier;
764 * No reference taken. The reference is held by the device
765 * (struct v4l2_subdev.dev), and async sub-device does not
766 * exist independently of the device at any point of time.
768 if (!sd->fwnode && sd->dev)
769 sd->fwnode = dev_fwnode(sd->dev);
771 mutex_lock(&list_lock);
773 INIT_LIST_HEAD(&sd->async_list);
775 list_for_each_entry(notifier, ¬ifier_list, list) {
776 struct v4l2_device *v4l2_dev =
777 v4l2_async_nf_find_v4l2_dev(notifier);
778 struct v4l2_async_subdev *asd;
783 asd = v4l2_async_find_match(notifier, sd);
787 ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
791 ret = v4l2_async_nf_try_complete(notifier);
798 /* None matched, wait for hot-plugging */
799 list_add(&sd->async_list, &subdev_list);
802 mutex_unlock(&list_lock);
808 * Complete failed. Unbind the sub-devices bound through registering
809 * this async sub-device.
811 subdev_notifier = v4l2_async_find_subdev_notifier(sd);
813 v4l2_async_nf_unbind_all_subdevs(subdev_notifier, false);
816 v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
817 v4l2_async_cleanup(sd);
819 mutex_unlock(&list_lock);
823 EXPORT_SYMBOL(v4l2_async_register_subdev);
825 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
827 if (!sd->async_list.next)
830 v4l2_subdev_put_privacy_led(sd);
832 mutex_lock(&list_lock);
834 __v4l2_async_nf_unregister(sd->subdev_notifier);
835 __v4l2_async_nf_cleanup(sd->subdev_notifier);
836 kfree(sd->subdev_notifier);
837 sd->subdev_notifier = NULL;
840 struct v4l2_async_notifier *notifier = sd->notifier;
842 list_add(&sd->asd->list, ¬ifier->waiting);
844 v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
847 v4l2_async_cleanup(sd);
849 mutex_unlock(&list_lock);
851 EXPORT_SYMBOL(v4l2_async_unregister_subdev);
853 static void print_waiting_subdev(struct seq_file *s,
854 struct v4l2_async_subdev *asd)
856 switch (asd->match_type) {
857 case V4L2_ASYNC_MATCH_I2C:
858 seq_printf(s, " [i2c] dev=%d-%04x\n", asd->match.i2c.adapter_id,
859 asd->match.i2c.address);
861 case V4L2_ASYNC_MATCH_FWNODE: {
862 struct fwnode_handle *devnode, *fwnode = asd->match.fwnode;
864 devnode = fwnode_graph_is_endpoint(fwnode) ?
865 fwnode_graph_get_port_parent(fwnode) :
866 fwnode_handle_get(fwnode);
868 seq_printf(s, " [fwnode] dev=%s, node=%pfw\n",
869 devnode->dev ? dev_name(devnode->dev) : "nil",
872 fwnode_handle_put(devnode);
879 v4l2_async_nf_name(struct v4l2_async_notifier *notifier)
881 if (notifier->v4l2_dev)
882 return notifier->v4l2_dev->name;
883 else if (notifier->sd)
884 return notifier->sd->name;
889 static int pending_subdevs_show(struct seq_file *s, void *data)
891 struct v4l2_async_notifier *notif;
892 struct v4l2_async_subdev *asd;
894 mutex_lock(&list_lock);
896 list_for_each_entry(notif, ¬ifier_list, list) {
897 seq_printf(s, "%s:\n", v4l2_async_nf_name(notif));
898 list_for_each_entry(asd, ¬if->waiting, list)
899 print_waiting_subdev(s, asd);
902 mutex_unlock(&list_lock);
906 DEFINE_SHOW_ATTRIBUTE(pending_subdevs);
908 static struct dentry *v4l2_async_debugfs_dir;
910 static int __init v4l2_async_init(void)
912 v4l2_async_debugfs_dir = debugfs_create_dir("v4l2-async", NULL);
913 debugfs_create_file("pending_async_subdevices", 0444,
914 v4l2_async_debugfs_dir, NULL,
915 &pending_subdevs_fops);
920 static void __exit v4l2_async_exit(void)
922 debugfs_remove_recursive(v4l2_async_debugfs_dir);
925 subsys_initcall(v4l2_async_init);
926 module_exit(v4l2_async_exit);
931 MODULE_LICENSE("GPL");