1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
4 * Copyright (c) 2008-2009 Marvell Semiconductor
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/list.h>
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/rtnetlink.h>
16 #include <linux/of_net.h>
17 #include <net/devlink.h>
21 static DEFINE_MUTEX(dsa2_mutex);
22 LIST_HEAD(dsa_tree_list);
25 * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
26 * @dst: collection of struct dsa_switch devices to notify.
27 * @e: event, must be of type DSA_NOTIFIER_*
28 * @v: event-specific value.
30 * Given a struct dsa_switch_tree, this can be used to run a function once for
31 * each member DSA switch. The other alternative of traversing the tree is only
32 * through its ports list, which does not uniquely list the switches.
34 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
36 struct raw_notifier_head *nh = &dst->nh;
39 err = raw_notifier_call_chain(nh, e, v);
41 return notifier_to_errno(err);
45 * dsa_broadcast - Notify all DSA trees in the system.
46 * @e: event, must be of type DSA_NOTIFIER_*
47 * @v: event-specific value.
49 * Can be used to notify the switching fabric of events such as cross-chip
50 * bridging between disjoint trees (such as islands of tagger-compatible
51 * switches bridged by an incompatible middle switch).
53 int dsa_broadcast(unsigned long e, void *v)
55 struct dsa_switch_tree *dst;
58 list_for_each_entry(dst, &dsa_tree_list, list) {
59 err = dsa_tree_notify(dst, e, v);
68 * dsa_lag_map() - Map LAG netdev to a linear LAG ID
69 * @dst: Tree in which to record the mapping.
70 * @lag: Netdev that is to be mapped to an ID.
72 * dsa_lag_id/dsa_lag_dev can then be used to translate between the
73 * two spaces. The size of the mapping space is determined by the
74 * driver by setting ds->num_lag_ids. It is perfectly legal to leave
75 * it unset if it is not needed, in which case these functions become
78 void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
82 if (dsa_lag_id(dst, lag) >= 0)
86 for (id = 0; id < dst->lags_len; id++) {
87 if (!dsa_lag_dev(dst, id)) {
93 /* No IDs left, which is OK. Some drivers do not need it. The
94 * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
95 * returns an error for this device when joining the LAG. The
96 * driver can then return -EOPNOTSUPP back to DSA, which will
97 * fall back to a software LAG.
102 * dsa_lag_unmap() - Remove a LAG ID mapping
103 * @dst: Tree in which the mapping is recorded.
104 * @lag: Netdev that was mapped.
106 * As there may be multiple users of the mapping, it is only removed
107 * if there are no other references to it.
109 void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag)
114 dsa_lag_foreach_port(dp, dst, lag)
115 /* There are remaining users of this mapping */
118 dsa_lags_foreach_id(id, dst) {
119 if (dsa_lag_dev(dst, id) == lag) {
120 dst->lags[id] = NULL;
126 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
128 struct dsa_switch_tree *dst;
131 list_for_each_entry(dst, &dsa_tree_list, list) {
132 if (dst->index != tree_index)
135 list_for_each_entry(dp, &dst->ports, list) {
136 if (dp->ds->index != sw_index)
145 EXPORT_SYMBOL_GPL(dsa_switch_find);
147 static struct dsa_switch_tree *dsa_tree_find(int index)
149 struct dsa_switch_tree *dst;
151 list_for_each_entry(dst, &dsa_tree_list, list)
152 if (dst->index == index)
158 static struct dsa_switch_tree *dsa_tree_alloc(int index)
160 struct dsa_switch_tree *dst;
162 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
168 INIT_LIST_HEAD(&dst->rtable);
170 INIT_LIST_HEAD(&dst->ports);
172 INIT_LIST_HEAD(&dst->list);
173 list_add_tail(&dst->list, &dsa_tree_list);
175 kref_init(&dst->refcount);
180 static void dsa_tree_free(struct dsa_switch_tree *dst)
183 dsa_tag_driver_put(dst->tag_ops);
184 list_del(&dst->list);
188 static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
191 kref_get(&dst->refcount);
196 static struct dsa_switch_tree *dsa_tree_touch(int index)
198 struct dsa_switch_tree *dst;
200 dst = dsa_tree_find(index);
202 return dsa_tree_get(dst);
204 return dsa_tree_alloc(index);
207 static void dsa_tree_release(struct kref *ref)
209 struct dsa_switch_tree *dst;
211 dst = container_of(ref, struct dsa_switch_tree, refcount);
216 static void dsa_tree_put(struct dsa_switch_tree *dst)
219 kref_put(&dst->refcount, dsa_tree_release);
222 static bool dsa_port_is_dsa(struct dsa_port *port)
224 return port->type == DSA_PORT_TYPE_DSA;
227 static bool dsa_port_is_cpu(struct dsa_port *port)
229 return port->type == DSA_PORT_TYPE_CPU;
232 static bool dsa_port_is_user(struct dsa_port *dp)
234 return dp->type == DSA_PORT_TYPE_USER;
237 static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
238 struct device_node *dn)
242 list_for_each_entry(dp, &dst->ports, list)
249 static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
250 struct dsa_port *link_dp)
252 struct dsa_switch *ds = dp->ds;
253 struct dsa_switch_tree *dst;
258 list_for_each_entry(dl, &dst->rtable, list)
259 if (dl->dp == dp && dl->link_dp == link_dp)
262 dl = kzalloc(sizeof(*dl), GFP_KERNEL);
267 dl->link_dp = link_dp;
269 INIT_LIST_HEAD(&dl->list);
270 list_add_tail(&dl->list, &dst->rtable);
275 static bool dsa_port_setup_routing_table(struct dsa_port *dp)
277 struct dsa_switch *ds = dp->ds;
278 struct dsa_switch_tree *dst = ds->dst;
279 struct device_node *dn = dp->dn;
280 struct of_phandle_iterator it;
281 struct dsa_port *link_dp;
285 of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
286 link_dp = dsa_tree_find_port_by_node(dst, it.node);
288 of_node_put(it.node);
292 dl = dsa_link_touch(dp, link_dp);
294 of_node_put(it.node);
302 static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
304 bool complete = true;
307 list_for_each_entry(dp, &dst->ports, list) {
308 if (dsa_port_is_dsa(dp)) {
309 complete = dsa_port_setup_routing_table(dp);
318 static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
322 list_for_each_entry(dp, &dst->ports, list)
323 if (dsa_port_is_cpu(dp))
329 static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
331 struct dsa_port *cpu_dp, *dp;
333 cpu_dp = dsa_tree_find_first_cpu(dst);
335 pr_err("DSA: tree %d has no CPU port\n", dst->index);
339 /* Assign the default CPU port to all ports of the fabric */
340 list_for_each_entry(dp, &dst->ports, list)
341 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
347 static void dsa_tree_teardown_default_cpu(struct dsa_switch_tree *dst)
351 list_for_each_entry(dp, &dst->ports, list)
352 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
356 static int dsa_port_setup(struct dsa_port *dp)
358 struct devlink_port *dlp = &dp->devlink_port;
359 bool dsa_port_link_registered = false;
360 bool dsa_port_enabled = false;
367 case DSA_PORT_TYPE_UNUSED:
368 dsa_port_disable(dp);
370 case DSA_PORT_TYPE_CPU:
371 err = dsa_port_link_register_of(dp);
374 dsa_port_link_registered = true;
376 err = dsa_port_enable(dp, NULL);
379 dsa_port_enabled = true;
382 case DSA_PORT_TYPE_DSA:
383 err = dsa_port_link_register_of(dp);
386 dsa_port_link_registered = true;
388 err = dsa_port_enable(dp, NULL);
391 dsa_port_enabled = true;
394 case DSA_PORT_TYPE_USER:
395 dp->mac = of_get_mac_address(dp->dn);
396 err = dsa_slave_create(dp);
400 devlink_port_type_eth_set(dlp, dp->slave);
404 if (err && dsa_port_enabled)
405 dsa_port_disable(dp);
406 if (err && dsa_port_link_registered)
407 dsa_port_link_unregister_of(dp);
416 static int dsa_port_devlink_setup(struct dsa_port *dp)
418 struct devlink_port *dlp = &dp->devlink_port;
419 struct dsa_switch_tree *dst = dp->ds->dst;
420 struct devlink_port_attrs attrs = {};
421 struct devlink *dl = dp->ds->devlink;
422 const unsigned char *id;
426 id = (const unsigned char *)&dst->index;
427 len = sizeof(dst->index);
429 attrs.phys.port_number = dp->index;
430 memcpy(attrs.switch_id.id, id, len);
431 attrs.switch_id.id_len = len;
432 memset(dlp, 0, sizeof(*dlp));
435 case DSA_PORT_TYPE_UNUSED:
436 attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
438 case DSA_PORT_TYPE_CPU:
439 attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
441 case DSA_PORT_TYPE_DSA:
442 attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
444 case DSA_PORT_TYPE_USER:
445 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
449 devlink_port_attrs_set(dlp, &attrs);
450 err = devlink_port_register(dl, dlp, dp->index);
453 dp->devlink_port_setup = true;
458 static void dsa_port_teardown(struct dsa_port *dp)
460 struct devlink_port *dlp = &dp->devlink_port;
465 devlink_port_type_clear(dlp);
468 case DSA_PORT_TYPE_UNUSED:
470 case DSA_PORT_TYPE_CPU:
471 dsa_port_disable(dp);
472 dsa_port_link_unregister_of(dp);
474 case DSA_PORT_TYPE_DSA:
475 dsa_port_disable(dp);
476 dsa_port_link_unregister_of(dp);
478 case DSA_PORT_TYPE_USER:
480 dsa_slave_destroy(dp->slave);
489 static void dsa_port_devlink_teardown(struct dsa_port *dp)
491 struct devlink_port *dlp = &dp->devlink_port;
493 if (dp->devlink_port_setup)
494 devlink_port_unregister(dlp);
495 dp->devlink_port_setup = false;
498 static int dsa_devlink_info_get(struct devlink *dl,
499 struct devlink_info_req *req,
500 struct netlink_ext_ack *extack)
502 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
504 if (ds->ops->devlink_info_get)
505 return ds->ops->devlink_info_get(ds, req, extack);
510 static int dsa_devlink_sb_pool_get(struct devlink *dl,
511 unsigned int sb_index, u16 pool_index,
512 struct devlink_sb_pool_info *pool_info)
514 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
516 if (!ds->ops->devlink_sb_pool_get)
519 return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
523 static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
524 u16 pool_index, u32 size,
525 enum devlink_sb_threshold_type threshold_type,
526 struct netlink_ext_ack *extack)
528 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
530 if (!ds->ops->devlink_sb_pool_set)
533 return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
534 threshold_type, extack);
537 static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
538 unsigned int sb_index, u16 pool_index,
541 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
542 int port = dsa_devlink_port_to_port(dlp);
544 if (!ds->ops->devlink_sb_port_pool_get)
547 return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
548 pool_index, p_threshold);
551 static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
552 unsigned int sb_index, u16 pool_index,
554 struct netlink_ext_ack *extack)
556 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
557 int port = dsa_devlink_port_to_port(dlp);
559 if (!ds->ops->devlink_sb_port_pool_set)
562 return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
563 pool_index, threshold, extack);
567 dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
568 unsigned int sb_index, u16 tc_index,
569 enum devlink_sb_pool_type pool_type,
570 u16 *p_pool_index, u32 *p_threshold)
572 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
573 int port = dsa_devlink_port_to_port(dlp);
575 if (!ds->ops->devlink_sb_tc_pool_bind_get)
578 return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
580 p_pool_index, p_threshold);
584 dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
585 unsigned int sb_index, u16 tc_index,
586 enum devlink_sb_pool_type pool_type,
587 u16 pool_index, u32 threshold,
588 struct netlink_ext_ack *extack)
590 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
591 int port = dsa_devlink_port_to_port(dlp);
593 if (!ds->ops->devlink_sb_tc_pool_bind_set)
596 return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
598 pool_index, threshold,
602 static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
603 unsigned int sb_index)
605 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
607 if (!ds->ops->devlink_sb_occ_snapshot)
610 return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
613 static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
614 unsigned int sb_index)
616 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
618 if (!ds->ops->devlink_sb_occ_max_clear)
621 return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
624 static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
625 unsigned int sb_index,
626 u16 pool_index, u32 *p_cur,
629 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
630 int port = dsa_devlink_port_to_port(dlp);
632 if (!ds->ops->devlink_sb_occ_port_pool_get)
635 return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
636 pool_index, p_cur, p_max);
640 dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
641 unsigned int sb_index, u16 tc_index,
642 enum devlink_sb_pool_type pool_type,
643 u32 *p_cur, u32 *p_max)
645 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
646 int port = dsa_devlink_port_to_port(dlp);
648 if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
651 return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
657 static const struct devlink_ops dsa_devlink_ops = {
658 .info_get = dsa_devlink_info_get,
659 .sb_pool_get = dsa_devlink_sb_pool_get,
660 .sb_pool_set = dsa_devlink_sb_pool_set,
661 .sb_port_pool_get = dsa_devlink_sb_port_pool_get,
662 .sb_port_pool_set = dsa_devlink_sb_port_pool_set,
663 .sb_tc_pool_bind_get = dsa_devlink_sb_tc_pool_bind_get,
664 .sb_tc_pool_bind_set = dsa_devlink_sb_tc_pool_bind_set,
665 .sb_occ_snapshot = dsa_devlink_sb_occ_snapshot,
666 .sb_occ_max_clear = dsa_devlink_sb_occ_max_clear,
667 .sb_occ_port_pool_get = dsa_devlink_sb_occ_port_pool_get,
668 .sb_occ_tc_port_bind_get = dsa_devlink_sb_occ_tc_port_bind_get,
671 static int dsa_switch_setup(struct dsa_switch *ds)
673 struct dsa_devlink_priv *dl_priv;
680 /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
681 * driver and before ops->setup() has run, since the switch drivers and
682 * the slave MDIO bus driver rely on these values for probing PHY
685 ds->phys_mii_mask |= dsa_user_ports(ds);
687 /* Add the switch to devlink before calling setup, so that setup can
690 ds->devlink = devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv));
693 dl_priv = devlink_priv(ds->devlink);
696 err = devlink_register(ds->devlink, ds->dev);
700 /* Setup devlink port instances now, so that the switch
701 * setup() can register regions etc, against the ports
703 list_for_each_entry(dp, &ds->dst->ports, list) {
705 err = dsa_port_devlink_setup(dp);
707 goto unregister_devlink_ports;
711 err = dsa_switch_register_notifier(ds);
713 goto unregister_devlink_ports;
715 ds->configure_vlan_while_not_filtering = true;
717 err = ds->ops->setup(ds);
719 goto unregister_notifier;
721 devlink_params_publish(ds->devlink);
723 if (!ds->slave_mii_bus && ds->ops->phy_read) {
724 ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
725 if (!ds->slave_mii_bus) {
730 dsa_slave_mii_bus_init(ds);
732 err = mdiobus_register(ds->slave_mii_bus);
742 if (ds->ops->teardown)
743 ds->ops->teardown(ds);
745 dsa_switch_unregister_notifier(ds);
746 unregister_devlink_ports:
747 list_for_each_entry(dp, &ds->dst->ports, list)
749 dsa_port_devlink_teardown(dp);
750 devlink_unregister(ds->devlink);
752 devlink_free(ds->devlink);
758 static void dsa_switch_teardown(struct dsa_switch *ds)
765 if (ds->slave_mii_bus && ds->ops->phy_read)
766 mdiobus_unregister(ds->slave_mii_bus);
768 dsa_switch_unregister_notifier(ds);
770 if (ds->ops->teardown)
771 ds->ops->teardown(ds);
774 list_for_each_entry(dp, &ds->dst->ports, list)
776 dsa_port_devlink_teardown(dp);
777 devlink_unregister(ds->devlink);
778 devlink_free(ds->devlink);
785 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
790 list_for_each_entry(dp, &dst->ports, list) {
791 err = dsa_switch_setup(dp->ds);
796 list_for_each_entry(dp, &dst->ports, list) {
797 err = dsa_port_setup(dp);
805 list_for_each_entry(dp, &dst->ports, list)
806 dsa_port_teardown(dp);
808 list_for_each_entry(dp, &dst->ports, list)
809 dsa_switch_teardown(dp->ds);
814 static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
818 list_for_each_entry(dp, &dst->ports, list)
819 dsa_port_teardown(dp);
821 list_for_each_entry(dp, &dst->ports, list)
822 dsa_switch_teardown(dp->ds);
825 static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
830 list_for_each_entry(dp, &dst->ports, list) {
831 if (dsa_port_is_cpu(dp)) {
832 err = dsa_master_setup(dp->master, dp);
841 static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
845 list_for_each_entry(dp, &dst->ports, list)
846 if (dsa_port_is_cpu(dp))
847 dsa_master_teardown(dp->master);
850 static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
852 unsigned int len = 0;
855 list_for_each_entry(dp, &dst->ports, list) {
856 if (dp->ds->num_lag_ids > len)
857 len = dp->ds->num_lag_ids;
863 dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
871 static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
876 static int dsa_tree_setup(struct dsa_switch_tree *dst)
882 pr_err("DSA: tree %d already setup! Disjoint trees?\n",
887 complete = dsa_tree_setup_routing_table(dst);
891 err = dsa_tree_setup_default_cpu(dst);
895 err = dsa_tree_setup_switches(dst);
897 goto teardown_default_cpu;
899 err = dsa_tree_setup_master(dst);
901 goto teardown_switches;
903 err = dsa_tree_setup_lags(dst);
905 goto teardown_master;
909 pr_info("DSA: tree %d setup\n", dst->index);
914 dsa_tree_teardown_master(dst);
916 dsa_tree_teardown_switches(dst);
917 teardown_default_cpu:
918 dsa_tree_teardown_default_cpu(dst);
923 static void dsa_tree_teardown(struct dsa_switch_tree *dst)
925 struct dsa_link *dl, *next;
930 dsa_tree_teardown_lags(dst);
932 dsa_tree_teardown_master(dst);
934 dsa_tree_teardown_switches(dst);
936 dsa_tree_teardown_default_cpu(dst);
938 list_for_each_entry_safe(dl, next, &dst->rtable, list) {
943 pr_info("DSA: tree %d torn down\n", dst->index);
948 /* Since the dsa/tagging sysfs device attribute is per master, the assumption
949 * is that all DSA switches within a tree share the same tagger, otherwise
950 * they would have formed disjoint trees (different "dsa,member" values).
952 int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
953 struct net_device *master,
954 const struct dsa_device_ops *tag_ops,
955 const struct dsa_device_ops *old_tag_ops)
957 struct dsa_notifier_tag_proto_info info;
962 return restart_syscall();
964 /* At the moment we don't allow changing the tag protocol under
965 * traffic. The rtnl_mutex also happens to serialize concurrent
966 * attempts to change the tagging protocol. If we ever lift the IFF_UP
967 * restriction, there needs to be another mutex which serializes this.
969 if (master->flags & IFF_UP)
972 list_for_each_entry(dp, &dst->ports, list) {
973 if (!dsa_is_user_port(dp->ds, dp->index))
976 if (dp->slave->flags & IFF_UP)
980 info.tag_ops = tag_ops;
981 err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
983 goto out_unwind_tagger;
985 dst->tag_ops = tag_ops;
992 info.tag_ops = old_tag_ops;
993 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
999 static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1001 struct dsa_switch_tree *dst = ds->dst;
1002 struct dsa_port *dp;
1004 list_for_each_entry(dp, &dst->ports, list)
1005 if (dp->ds == ds && dp->index == index)
1008 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1015 INIT_LIST_HEAD(&dp->list);
1016 list_add_tail(&dp->list, &dst->ports);
1021 static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1026 dp->type = DSA_PORT_TYPE_USER;
1032 static int dsa_port_parse_dsa(struct dsa_port *dp)
1034 dp->type = DSA_PORT_TYPE_DSA;
1039 static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1040 struct net_device *master)
1042 enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1043 struct dsa_switch *mds, *ds = dp->ds;
1044 unsigned int mdp_upstream;
1045 struct dsa_port *mdp;
1047 /* It is possible to stack DSA switches onto one another when that
1048 * happens the switch driver may want to know if its tagging protocol
1049 * is going to work in such a configuration.
1051 if (dsa_slave_dev_check(master)) {
1052 mdp = dsa_slave_to_port(master);
1054 mdp_upstream = dsa_upstream_port(mds, mdp->index);
1055 tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1056 DSA_TAG_PROTO_NONE);
1059 /* If the master device is not itself a DSA slave in a disjoint DSA
1060 * tree, then return immediately.
1062 return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1065 static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master)
1067 struct dsa_switch *ds = dp->ds;
1068 struct dsa_switch_tree *dst = ds->dst;
1069 enum dsa_tag_protocol tag_protocol;
1071 tag_protocol = dsa_get_tag_protocol(dp, master);
1073 if (dst->tag_ops->proto != tag_protocol) {
1075 "A DSA switch tree can have only one tagging protocol\n");
1078 /* In the case of multiple CPU ports per switch, the tagging
1079 * protocol is still reference-counted only per switch tree, so
1080 * nothing to do here.
1083 dst->tag_ops = dsa_tag_driver_get(tag_protocol);
1084 if (IS_ERR(dst->tag_ops)) {
1085 if (PTR_ERR(dst->tag_ops) == -ENOPROTOOPT)
1086 return -EPROBE_DEFER;
1087 dev_warn(ds->dev, "No tagger for this switch\n");
1089 return PTR_ERR(dst->tag_ops);
1093 dp->master = master;
1094 dp->type = DSA_PORT_TYPE_CPU;
1095 dsa_port_set_tag_protocol(dp, dst->tag_ops);
1101 static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1103 struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1104 const char *name = of_get_property(dn, "label", NULL);
1105 bool link = of_property_read_bool(dn, "link");
1110 struct net_device *master;
1112 master = of_find_net_device_by_node(ethernet);
1114 return -EPROBE_DEFER;
1116 return dsa_port_parse_cpu(dp, master);
1120 return dsa_port_parse_dsa(dp);
1122 return dsa_port_parse_user(dp, name);
1125 static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1126 struct device_node *dn)
1128 struct device_node *ports, *port;
1129 struct dsa_port *dp;
1133 ports = of_get_child_by_name(dn, "ports");
1135 /* The second possibility is "ethernet-ports" */
1136 ports = of_get_child_by_name(dn, "ethernet-ports");
1138 dev_err(ds->dev, "no ports child node found\n");
1143 for_each_available_child_of_node(ports, port) {
1144 err = of_property_read_u32(port, "reg", ®);
1148 if (reg >= ds->num_ports) {
1149 dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%zu)\n",
1150 port, reg, ds->num_ports);
1155 dp = dsa_to_port(ds, reg);
1157 err = dsa_port_parse_of(dp, port);
1167 static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1168 struct device_node *dn)
1170 u32 m[2] = { 0, 0 };
1173 /* Don't error out if this optional property isn't found */
1174 sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1175 if (sz < 0 && sz != -EINVAL)
1180 ds->dst = dsa_tree_touch(m[0]);
1187 static int dsa_switch_touch_ports(struct dsa_switch *ds)
1189 struct dsa_port *dp;
1192 for (port = 0; port < ds->num_ports; port++) {
1193 dp = dsa_port_touch(ds, port);
1201 static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1205 err = dsa_switch_parse_member_of(ds, dn);
1209 err = dsa_switch_touch_ports(ds);
1213 return dsa_switch_parse_ports_of(ds, dn);
1216 static int dsa_port_parse(struct dsa_port *dp, const char *name,
1219 if (!strcmp(name, "cpu")) {
1220 struct net_device *master;
1222 master = dsa_dev_to_net_device(dev);
1224 return -EPROBE_DEFER;
1228 return dsa_port_parse_cpu(dp, master);
1231 if (!strcmp(name, "dsa"))
1232 return dsa_port_parse_dsa(dp);
1234 return dsa_port_parse_user(dp, name);
1237 static int dsa_switch_parse_ports(struct dsa_switch *ds,
1238 struct dsa_chip_data *cd)
1240 bool valid_name_found = false;
1241 struct dsa_port *dp;
1247 for (i = 0; i < DSA_MAX_PORTS; i++) {
1248 name = cd->port_names[i];
1249 dev = cd->netdev[i];
1250 dp = dsa_to_port(ds, i);
1255 err = dsa_port_parse(dp, name, dev);
1259 valid_name_found = true;
1262 if (!valid_name_found && i == DSA_MAX_PORTS)
1268 static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1274 /* We don't support interconnected switches nor multiple trees via
1275 * platform data, so this is the unique switch of the tree.
1278 ds->dst = dsa_tree_touch(0);
1282 err = dsa_switch_touch_ports(ds);
1286 return dsa_switch_parse_ports(ds, cd);
1289 static void dsa_switch_release_ports(struct dsa_switch *ds)
1291 struct dsa_switch_tree *dst = ds->dst;
1292 struct dsa_port *dp, *next;
1294 list_for_each_entry_safe(dp, next, &dst->ports, list) {
1297 list_del(&dp->list);
1302 static int dsa_switch_probe(struct dsa_switch *ds)
1304 struct dsa_switch_tree *dst;
1305 struct dsa_chip_data *pdata;
1306 struct device_node *np;
1312 pdata = ds->dev->platform_data;
1313 np = ds->dev->of_node;
1319 err = dsa_switch_parse_of(ds, np);
1321 dsa_switch_release_ports(ds);
1323 err = dsa_switch_parse(ds, pdata);
1325 dsa_switch_release_ports(ds);
1335 err = dsa_tree_setup(dst);
1337 dsa_switch_release_ports(ds);
1344 int dsa_register_switch(struct dsa_switch *ds)
1348 mutex_lock(&dsa2_mutex);
1349 err = dsa_switch_probe(ds);
1350 dsa_tree_put(ds->dst);
1351 mutex_unlock(&dsa2_mutex);
1355 EXPORT_SYMBOL_GPL(dsa_register_switch);
1357 static void dsa_switch_remove(struct dsa_switch *ds)
1359 struct dsa_switch_tree *dst = ds->dst;
1361 dsa_tree_teardown(dst);
1362 dsa_switch_release_ports(ds);
1366 void dsa_unregister_switch(struct dsa_switch *ds)
1368 mutex_lock(&dsa2_mutex);
1369 dsa_switch_remove(ds);
1370 mutex_unlock(&dsa2_mutex);
1372 EXPORT_SYMBOL_GPL(dsa_unregister_switch);