1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
4 * Copyright (c) 2008-2009 Marvell Semiconductor
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/list.h>
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/rtnetlink.h>
16 #include <linux/of_net.h>
17 #include <net/devlink.h>
18 #include <net/sch_generic.h>
22 static DEFINE_MUTEX(dsa2_mutex);
23 LIST_HEAD(dsa_tree_list);
25 /* Track the bridges with forwarding offload enabled */
26 static unsigned long dsa_fwd_offloading_bridges;
29 * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
30 * @dst: collection of struct dsa_switch devices to notify.
31 * @e: event, must be of type DSA_NOTIFIER_*
32 * @v: event-specific value.
34 * Given a struct dsa_switch_tree, this can be used to run a function once for
35 * each member DSA switch. The other alternative of traversing the tree is only
36 * through its ports list, which does not uniquely list the switches.
38 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
40 struct raw_notifier_head *nh = &dst->nh;
43 err = raw_notifier_call_chain(nh, e, v);
45 return notifier_to_errno(err);
49 * dsa_broadcast - Notify all DSA trees in the system.
50 * @e: event, must be of type DSA_NOTIFIER_*
51 * @v: event-specific value.
53 * Can be used to notify the switching fabric of events such as cross-chip
54 * bridging between disjoint trees (such as islands of tagger-compatible
55 * switches bridged by an incompatible middle switch).
57 * WARNING: this function is not reliable during probe time, because probing
58 * between trees is asynchronous and not all DSA trees might have probed.
60 int dsa_broadcast(unsigned long e, void *v)
62 struct dsa_switch_tree *dst;
65 list_for_each_entry(dst, &dsa_tree_list, list) {
66 err = dsa_tree_notify(dst, e, v);
75 * dsa_lag_map() - Map LAG structure to a linear LAG array
76 * @dst: Tree in which to record the mapping.
77 * @lag: LAG structure that is to be mapped to the tree's array.
79 * dsa_lag_id/dsa_lag_by_id can then be used to translate between the
80 * two spaces. The size of the mapping space is determined by the
81 * driver by setting ds->num_lag_ids. It is perfectly legal to leave
82 * it unset if it is not needed, in which case these functions become
85 void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag)
89 for (id = 1; id <= dst->lags_len; id++) {
90 if (!dsa_lag_by_id(dst, id)) {
91 dst->lags[id - 1] = lag;
97 /* No IDs left, which is OK. Some drivers do not need it. The
98 * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
99 * returns an error for this device when joining the LAG. The
100 * driver can then return -EOPNOTSUPP back to DSA, which will
101 * fall back to a software LAG.
106 * dsa_lag_unmap() - Remove a LAG ID mapping
107 * @dst: Tree in which the mapping is recorded.
108 * @lag: LAG structure that was mapped.
110 * As there may be multiple users of the mapping, it is only removed
111 * if there are no other references to it.
113 void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag)
117 dsa_lags_foreach_id(id, dst) {
118 if (dsa_lag_by_id(dst, id) == lag) {
119 dst->lags[id - 1] = NULL;
126 struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
127 const struct net_device *lag_dev)
131 list_for_each_entry(dp, &dst->ports, list)
132 if (dsa_port_lag_dev_get(dp) == lag_dev)
138 struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
139 const struct net_device *br)
143 list_for_each_entry(dp, &dst->ports, list)
144 if (dsa_port_bridge_dev_get(dp) == br)
150 static int dsa_bridge_num_find(const struct net_device *bridge_dev)
152 struct dsa_switch_tree *dst;
154 list_for_each_entry(dst, &dsa_tree_list, list) {
155 struct dsa_bridge *bridge;
157 bridge = dsa_tree_bridge_find(dst, bridge_dev);
165 unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
167 unsigned int bridge_num = dsa_bridge_num_find(bridge_dev);
169 /* Switches without FDB isolation support don't get unique
176 /* First port that requests FDB isolation or TX forwarding
177 * offload for this bridge
179 bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
180 DSA_MAX_NUM_OFFLOADING_BRIDGES,
182 if (bridge_num >= max)
185 set_bit(bridge_num, &dsa_fwd_offloading_bridges);
191 void dsa_bridge_num_put(const struct net_device *bridge_dev,
192 unsigned int bridge_num)
194 /* Since we refcount bridges, we know that when we call this function
195 * it is no longer in use, so we can just go ahead and remove it from
198 clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
201 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
203 struct dsa_switch_tree *dst;
206 list_for_each_entry(dst, &dsa_tree_list, list) {
207 if (dst->index != tree_index)
210 list_for_each_entry(dp, &dst->ports, list) {
211 if (dp->ds->index != sw_index)
220 EXPORT_SYMBOL_GPL(dsa_switch_find);
222 static struct dsa_switch_tree *dsa_tree_find(int index)
224 struct dsa_switch_tree *dst;
226 list_for_each_entry(dst, &dsa_tree_list, list)
227 if (dst->index == index)
233 static struct dsa_switch_tree *dsa_tree_alloc(int index)
235 struct dsa_switch_tree *dst;
237 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
243 INIT_LIST_HEAD(&dst->rtable);
245 INIT_LIST_HEAD(&dst->ports);
247 INIT_LIST_HEAD(&dst->list);
248 list_add_tail(&dst->list, &dsa_tree_list);
250 kref_init(&dst->refcount);
255 static void dsa_tree_free(struct dsa_switch_tree *dst)
258 dsa_tag_driver_put(dst->tag_ops);
259 list_del(&dst->list);
263 static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
266 kref_get(&dst->refcount);
271 static struct dsa_switch_tree *dsa_tree_touch(int index)
273 struct dsa_switch_tree *dst;
275 dst = dsa_tree_find(index);
277 return dsa_tree_get(dst);
279 return dsa_tree_alloc(index);
282 static void dsa_tree_release(struct kref *ref)
284 struct dsa_switch_tree *dst;
286 dst = container_of(ref, struct dsa_switch_tree, refcount);
291 static void dsa_tree_put(struct dsa_switch_tree *dst)
294 kref_put(&dst->refcount, dsa_tree_release);
297 static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
298 struct device_node *dn)
302 list_for_each_entry(dp, &dst->ports, list)
309 static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
310 struct dsa_port *link_dp)
312 struct dsa_switch *ds = dp->ds;
313 struct dsa_switch_tree *dst;
318 list_for_each_entry(dl, &dst->rtable, list)
319 if (dl->dp == dp && dl->link_dp == link_dp)
322 dl = kzalloc(sizeof(*dl), GFP_KERNEL);
327 dl->link_dp = link_dp;
329 INIT_LIST_HEAD(&dl->list);
330 list_add_tail(&dl->list, &dst->rtable);
335 static bool dsa_port_setup_routing_table(struct dsa_port *dp)
337 struct dsa_switch *ds = dp->ds;
338 struct dsa_switch_tree *dst = ds->dst;
339 struct device_node *dn = dp->dn;
340 struct of_phandle_iterator it;
341 struct dsa_port *link_dp;
345 of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
346 link_dp = dsa_tree_find_port_by_node(dst, it.node);
348 of_node_put(it.node);
352 dl = dsa_link_touch(dp, link_dp);
354 of_node_put(it.node);
362 static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
364 bool complete = true;
367 list_for_each_entry(dp, &dst->ports, list) {
368 if (dsa_port_is_dsa(dp)) {
369 complete = dsa_port_setup_routing_table(dp);
378 static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
382 list_for_each_entry(dp, &dst->ports, list)
383 if (dsa_port_is_cpu(dp))
389 /* Assign the default CPU port (the first one in the tree) to all ports of the
390 * fabric which don't already have one as part of their own switch.
392 static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
394 struct dsa_port *cpu_dp, *dp;
396 cpu_dp = dsa_tree_find_first_cpu(dst);
398 pr_err("DSA: tree %d has no CPU port\n", dst->index);
402 list_for_each_entry(dp, &dst->ports, list) {
406 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
413 /* Perform initial assignment of CPU ports to user ports and DSA links in the
414 * fabric, giving preference to CPU ports local to each switch. Default to
415 * using the first CPU port in the switch tree if the port does not have a CPU
416 * port local to this switch.
418 static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
420 struct dsa_port *cpu_dp, *dp;
422 list_for_each_entry(cpu_dp, &dst->ports, list) {
423 if (!dsa_port_is_cpu(cpu_dp))
426 /* Prefer a local CPU port */
427 dsa_switch_for_each_port(dp, cpu_dp->ds) {
428 /* Prefer the first local CPU port found */
432 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
437 return dsa_tree_setup_default_cpu(dst);
440 static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
444 list_for_each_entry(dp, &dst->ports, list)
445 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
449 static int dsa_port_setup(struct dsa_port *dp)
451 struct devlink_port *dlp = &dp->devlink_port;
452 bool dsa_port_link_registered = false;
453 struct dsa_switch *ds = dp->ds;
454 bool dsa_port_enabled = false;
460 if (ds->ops->port_setup) {
461 err = ds->ops->port_setup(ds, dp->index);
467 case DSA_PORT_TYPE_UNUSED:
468 dsa_port_disable(dp);
470 case DSA_PORT_TYPE_CPU:
471 err = dsa_port_link_register_of(dp);
474 dsa_port_link_registered = true;
476 err = dsa_port_enable(dp, NULL);
479 dsa_port_enabled = true;
482 case DSA_PORT_TYPE_DSA:
483 err = dsa_port_link_register_of(dp);
486 dsa_port_link_registered = true;
488 err = dsa_port_enable(dp, NULL);
491 dsa_port_enabled = true;
494 case DSA_PORT_TYPE_USER:
495 of_get_mac_address(dp->dn, dp->mac);
496 err = dsa_slave_create(dp);
500 devlink_port_type_eth_set(dlp, dp->slave);
504 if (err && dsa_port_enabled)
505 dsa_port_disable(dp);
506 if (err && dsa_port_link_registered)
507 dsa_port_link_unregister_of(dp);
509 if (ds->ops->port_teardown)
510 ds->ops->port_teardown(ds, dp->index);
519 static int dsa_port_devlink_setup(struct dsa_port *dp)
521 struct devlink_port *dlp = &dp->devlink_port;
522 struct dsa_switch_tree *dst = dp->ds->dst;
523 struct devlink_port_attrs attrs = {};
524 struct devlink *dl = dp->ds->devlink;
525 const unsigned char *id;
529 id = (const unsigned char *)&dst->index;
530 len = sizeof(dst->index);
532 attrs.phys.port_number = dp->index;
533 memcpy(attrs.switch_id.id, id, len);
534 attrs.switch_id.id_len = len;
535 memset(dlp, 0, sizeof(*dlp));
538 case DSA_PORT_TYPE_UNUSED:
539 attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
541 case DSA_PORT_TYPE_CPU:
542 attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
544 case DSA_PORT_TYPE_DSA:
545 attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
547 case DSA_PORT_TYPE_USER:
548 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
552 devlink_port_attrs_set(dlp, &attrs);
553 err = devlink_port_register(dl, dlp, dp->index);
556 dp->devlink_port_setup = true;
561 static void dsa_port_teardown(struct dsa_port *dp)
563 struct devlink_port *dlp = &dp->devlink_port;
564 struct dsa_switch *ds = dp->ds;
569 if (ds->ops->port_teardown)
570 ds->ops->port_teardown(ds, dp->index);
572 devlink_port_type_clear(dlp);
575 case DSA_PORT_TYPE_UNUSED:
577 case DSA_PORT_TYPE_CPU:
578 dsa_port_disable(dp);
579 dsa_port_link_unregister_of(dp);
581 case DSA_PORT_TYPE_DSA:
582 dsa_port_disable(dp);
583 dsa_port_link_unregister_of(dp);
585 case DSA_PORT_TYPE_USER:
587 dsa_slave_destroy(dp->slave);
596 static void dsa_port_devlink_teardown(struct dsa_port *dp)
598 struct devlink_port *dlp = &dp->devlink_port;
600 if (dp->devlink_port_setup)
601 devlink_port_unregister(dlp);
602 dp->devlink_port_setup = false;
605 /* Destroy the current devlink port, and create a new one which has the UNUSED
606 * flavour. At this point, any call to ds->ops->port_setup has been already
607 * balanced out by a call to ds->ops->port_teardown, so we know that any
608 * devlink port regions the driver had are now unregistered. We then call its
609 * ds->ops->port_setup again, in order for the driver to re-create them on the
612 static int dsa_port_reinit_as_unused(struct dsa_port *dp)
614 struct dsa_switch *ds = dp->ds;
617 dsa_port_devlink_teardown(dp);
618 dp->type = DSA_PORT_TYPE_UNUSED;
619 err = dsa_port_devlink_setup(dp);
623 if (ds->ops->port_setup) {
624 /* On error, leave the devlink port registered,
625 * dsa_switch_teardown will clean it up later.
627 err = ds->ops->port_setup(ds, dp->index);
635 static int dsa_devlink_info_get(struct devlink *dl,
636 struct devlink_info_req *req,
637 struct netlink_ext_ack *extack)
639 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
641 if (ds->ops->devlink_info_get)
642 return ds->ops->devlink_info_get(ds, req, extack);
647 static int dsa_devlink_sb_pool_get(struct devlink *dl,
648 unsigned int sb_index, u16 pool_index,
649 struct devlink_sb_pool_info *pool_info)
651 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
653 if (!ds->ops->devlink_sb_pool_get)
656 return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
660 static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
661 u16 pool_index, u32 size,
662 enum devlink_sb_threshold_type threshold_type,
663 struct netlink_ext_ack *extack)
665 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
667 if (!ds->ops->devlink_sb_pool_set)
670 return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
671 threshold_type, extack);
674 static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
675 unsigned int sb_index, u16 pool_index,
678 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
679 int port = dsa_devlink_port_to_port(dlp);
681 if (!ds->ops->devlink_sb_port_pool_get)
684 return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
685 pool_index, p_threshold);
688 static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
689 unsigned int sb_index, u16 pool_index,
691 struct netlink_ext_ack *extack)
693 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
694 int port = dsa_devlink_port_to_port(dlp);
696 if (!ds->ops->devlink_sb_port_pool_set)
699 return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
700 pool_index, threshold, extack);
704 dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
705 unsigned int sb_index, u16 tc_index,
706 enum devlink_sb_pool_type pool_type,
707 u16 *p_pool_index, u32 *p_threshold)
709 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
710 int port = dsa_devlink_port_to_port(dlp);
712 if (!ds->ops->devlink_sb_tc_pool_bind_get)
715 return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
717 p_pool_index, p_threshold);
721 dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
722 unsigned int sb_index, u16 tc_index,
723 enum devlink_sb_pool_type pool_type,
724 u16 pool_index, u32 threshold,
725 struct netlink_ext_ack *extack)
727 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
728 int port = dsa_devlink_port_to_port(dlp);
730 if (!ds->ops->devlink_sb_tc_pool_bind_set)
733 return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
735 pool_index, threshold,
739 static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
740 unsigned int sb_index)
742 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
744 if (!ds->ops->devlink_sb_occ_snapshot)
747 return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
750 static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
751 unsigned int sb_index)
753 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
755 if (!ds->ops->devlink_sb_occ_max_clear)
758 return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
761 static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
762 unsigned int sb_index,
763 u16 pool_index, u32 *p_cur,
766 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
767 int port = dsa_devlink_port_to_port(dlp);
769 if (!ds->ops->devlink_sb_occ_port_pool_get)
772 return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
773 pool_index, p_cur, p_max);
777 dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
778 unsigned int sb_index, u16 tc_index,
779 enum devlink_sb_pool_type pool_type,
780 u32 *p_cur, u32 *p_max)
782 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
783 int port = dsa_devlink_port_to_port(dlp);
785 if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
788 return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
794 static const struct devlink_ops dsa_devlink_ops = {
795 .info_get = dsa_devlink_info_get,
796 .sb_pool_get = dsa_devlink_sb_pool_get,
797 .sb_pool_set = dsa_devlink_sb_pool_set,
798 .sb_port_pool_get = dsa_devlink_sb_port_pool_get,
799 .sb_port_pool_set = dsa_devlink_sb_port_pool_set,
800 .sb_tc_pool_bind_get = dsa_devlink_sb_tc_pool_bind_get,
801 .sb_tc_pool_bind_set = dsa_devlink_sb_tc_pool_bind_set,
802 .sb_occ_snapshot = dsa_devlink_sb_occ_snapshot,
803 .sb_occ_max_clear = dsa_devlink_sb_occ_max_clear,
804 .sb_occ_port_pool_get = dsa_devlink_sb_occ_port_pool_get,
805 .sb_occ_tc_port_bind_get = dsa_devlink_sb_occ_tc_port_bind_get,
808 static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
810 const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
811 struct dsa_switch_tree *dst = ds->dst;
812 struct dsa_port *cpu_dp;
815 if (tag_ops->proto == dst->default_proto)
818 dsa_switch_for_each_cpu_port(cpu_dp, ds) {
820 err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
824 dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
825 tag_ops->name, ERR_PTR(err));
831 if (tag_ops->connect) {
832 err = tag_ops->connect(ds);
837 if (ds->ops->connect_tag_protocol) {
838 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
841 "Unable to connect to tag protocol \"%s\": %pe\n",
842 tag_ops->name, ERR_PTR(err));
850 if (tag_ops->disconnect)
851 tag_ops->disconnect(ds);
856 static int dsa_switch_setup(struct dsa_switch *ds)
858 struct dsa_devlink_priv *dl_priv;
865 /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
866 * driver and before ops->setup() has run, since the switch drivers and
867 * the slave MDIO bus driver rely on these values for probing PHY
870 ds->phys_mii_mask |= dsa_user_ports(ds);
872 /* Add the switch to devlink before calling setup, so that setup can
876 devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv), ds->dev);
879 dl_priv = devlink_priv(ds->devlink);
882 /* Setup devlink port instances now, so that the switch
883 * setup() can register regions etc, against the ports
885 dsa_switch_for_each_port(dp, ds) {
886 err = dsa_port_devlink_setup(dp);
888 goto unregister_devlink_ports;
891 err = dsa_switch_register_notifier(ds);
893 goto unregister_devlink_ports;
895 ds->configure_vlan_while_not_filtering = true;
897 err = ds->ops->setup(ds);
899 goto unregister_notifier;
901 err = dsa_switch_setup_tag_protocol(ds);
905 if (!ds->slave_mii_bus && ds->ops->phy_read) {
906 ds->slave_mii_bus = mdiobus_alloc();
907 if (!ds->slave_mii_bus) {
912 dsa_slave_mii_bus_init(ds);
914 err = mdiobus_register(ds->slave_mii_bus);
916 goto free_slave_mii_bus;
920 devlink_register(ds->devlink);
924 if (ds->slave_mii_bus && ds->ops->phy_read)
925 mdiobus_free(ds->slave_mii_bus);
927 if (ds->ops->teardown)
928 ds->ops->teardown(ds);
930 dsa_switch_unregister_notifier(ds);
931 unregister_devlink_ports:
932 dsa_switch_for_each_port(dp, ds)
933 dsa_port_devlink_teardown(dp);
934 devlink_free(ds->devlink);
939 static void dsa_switch_teardown(struct dsa_switch *ds)
947 devlink_unregister(ds->devlink);
949 if (ds->slave_mii_bus && ds->ops->phy_read) {
950 mdiobus_unregister(ds->slave_mii_bus);
951 mdiobus_free(ds->slave_mii_bus);
952 ds->slave_mii_bus = NULL;
955 if (ds->ops->teardown)
956 ds->ops->teardown(ds);
958 dsa_switch_unregister_notifier(ds);
961 dsa_switch_for_each_port(dp, ds)
962 dsa_port_devlink_teardown(dp);
963 devlink_free(ds->devlink);
970 /* First tear down the non-shared, then the shared ports. This ensures that
971 * all work items scheduled by our switchdev handlers for user ports have
972 * completed before we destroy the refcounting kept on the shared ports.
974 static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
978 list_for_each_entry(dp, &dst->ports, list)
979 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
980 dsa_port_teardown(dp);
982 dsa_flush_workqueue();
984 list_for_each_entry(dp, &dst->ports, list)
985 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
986 dsa_port_teardown(dp);
989 static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
993 list_for_each_entry(dp, &dst->ports, list)
994 dsa_switch_teardown(dp->ds);
997 /* Bring shared ports up first, then non-shared ports */
998 static int dsa_tree_setup_ports(struct dsa_switch_tree *dst)
1000 struct dsa_port *dp;
1003 list_for_each_entry(dp, &dst->ports, list) {
1004 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) {
1005 err = dsa_port_setup(dp);
1011 list_for_each_entry(dp, &dst->ports, list) {
1012 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
1013 err = dsa_port_setup(dp);
1015 err = dsa_port_reinit_as_unused(dp);
1025 dsa_tree_teardown_ports(dst);
1030 static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
1032 struct dsa_port *dp;
1035 list_for_each_entry(dp, &dst->ports, list) {
1036 err = dsa_switch_setup(dp->ds);
1038 dsa_tree_teardown_switches(dst);
1046 static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
1048 struct dsa_port *dp;
1053 list_for_each_entry(dp, &dst->ports, list) {
1054 if (dsa_port_is_cpu(dp)) {
1055 struct net_device *master = dp->master;
1056 bool admin_up = (master->flags & IFF_UP) &&
1057 !qdisc_tx_is_noop(master);
1059 err = dsa_master_setup(master, dp);
1063 /* Replay master state event */
1064 dsa_tree_master_admin_state_change(dst, master, admin_up);
1065 dsa_tree_master_oper_state_change(dst, master,
1066 netif_oper_up(master));
1075 static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
1077 struct dsa_port *dp;
1081 list_for_each_entry(dp, &dst->ports, list) {
1082 if (dsa_port_is_cpu(dp)) {
1083 struct net_device *master = dp->master;
1085 /* Synthesizing an "admin down" state is sufficient for
1086 * the switches to get a notification if the master is
1087 * currently up and running.
1089 dsa_tree_master_admin_state_change(dst, master, false);
1091 dsa_master_teardown(master);
1098 static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
1100 unsigned int len = 0;
1101 struct dsa_port *dp;
1103 list_for_each_entry(dp, &dst->ports, list) {
1104 if (dp->ds->num_lag_ids > len)
1105 len = dp->ds->num_lag_ids;
1111 dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
1115 dst->lags_len = len;
1119 static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
1124 static int dsa_tree_setup(struct dsa_switch_tree *dst)
1130 pr_err("DSA: tree %d already setup! Disjoint trees?\n",
1135 complete = dsa_tree_setup_routing_table(dst);
1139 err = dsa_tree_setup_cpu_ports(dst);
1143 err = dsa_tree_setup_switches(dst);
1145 goto teardown_cpu_ports;
1147 err = dsa_tree_setup_ports(dst);
1149 goto teardown_switches;
1151 err = dsa_tree_setup_master(dst);
1153 goto teardown_ports;
1155 err = dsa_tree_setup_lags(dst);
1157 goto teardown_master;
1161 pr_info("DSA: tree %d setup\n", dst->index);
1166 dsa_tree_teardown_master(dst);
1168 dsa_tree_teardown_ports(dst);
1170 dsa_tree_teardown_switches(dst);
1172 dsa_tree_teardown_cpu_ports(dst);
1177 static void dsa_tree_teardown(struct dsa_switch_tree *dst)
1179 struct dsa_link *dl, *next;
1184 dsa_tree_teardown_lags(dst);
1186 dsa_tree_teardown_master(dst);
1188 dsa_tree_teardown_ports(dst);
1190 dsa_tree_teardown_switches(dst);
1192 dsa_tree_teardown_cpu_ports(dst);
1194 list_for_each_entry_safe(dl, next, &dst->rtable, list) {
1195 list_del(&dl->list);
1199 pr_info("DSA: tree %d torn down\n", dst->index);
1204 static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst,
1205 const struct dsa_device_ops *tag_ops)
1207 const struct dsa_device_ops *old_tag_ops = dst->tag_ops;
1208 struct dsa_notifier_tag_proto_info info;
1211 dst->tag_ops = tag_ops;
1213 /* Notify the switches from this tree about the connection
1216 info.tag_ops = tag_ops;
1217 err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info);
1218 if (err && err != -EOPNOTSUPP)
1219 goto out_disconnect;
1221 /* Notify the old tagger about the disconnection from this tree */
1222 info.tag_ops = old_tag_ops;
1223 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
1228 info.tag_ops = tag_ops;
1229 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
1230 dst->tag_ops = old_tag_ops;
1235 /* Since the dsa/tagging sysfs device attribute is per master, the assumption
1236 * is that all DSA switches within a tree share the same tagger, otherwise
1237 * they would have formed disjoint trees (different "dsa,member" values).
1239 int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
1240 struct net_device *master,
1241 const struct dsa_device_ops *tag_ops,
1242 const struct dsa_device_ops *old_tag_ops)
1244 struct dsa_notifier_tag_proto_info info;
1245 struct dsa_port *dp;
1248 if (!rtnl_trylock())
1249 return restart_syscall();
1251 /* At the moment we don't allow changing the tag protocol under
1252 * traffic. The rtnl_mutex also happens to serialize concurrent
1253 * attempts to change the tagging protocol. If we ever lift the IFF_UP
1254 * restriction, there needs to be another mutex which serializes this.
1256 if (master->flags & IFF_UP)
1259 list_for_each_entry(dp, &dst->ports, list) {
1260 if (!dsa_port_is_user(dp))
1263 if (dp->slave->flags & IFF_UP)
1267 /* Notify the tag protocol change */
1268 info.tag_ops = tag_ops;
1269 err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1271 goto out_unwind_tagger;
1273 err = dsa_tree_bind_tag_proto(dst, tag_ops);
1275 goto out_unwind_tagger;
1282 info.tag_ops = old_tag_ops;
1283 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1289 static void dsa_tree_master_state_change(struct dsa_switch_tree *dst,
1290 struct net_device *master)
1292 struct dsa_notifier_master_state_info info;
1293 struct dsa_port *cpu_dp = master->dsa_ptr;
1295 info.master = master;
1296 info.operational = dsa_port_master_is_operational(cpu_dp);
1298 dsa_tree_notify(dst, DSA_NOTIFIER_MASTER_STATE_CHANGE, &info);
1301 void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst,
1302 struct net_device *master,
1305 struct dsa_port *cpu_dp = master->dsa_ptr;
1306 bool notify = false;
1308 if ((dsa_port_master_is_operational(cpu_dp)) !=
1309 (up && cpu_dp->master_oper_up))
1312 cpu_dp->master_admin_up = up;
1315 dsa_tree_master_state_change(dst, master);
1318 void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst,
1319 struct net_device *master,
1322 struct dsa_port *cpu_dp = master->dsa_ptr;
1323 bool notify = false;
1325 if ((dsa_port_master_is_operational(cpu_dp)) !=
1326 (cpu_dp->master_admin_up && up))
1329 cpu_dp->master_oper_up = up;
1332 dsa_tree_master_state_change(dst, master);
1335 static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1337 struct dsa_switch_tree *dst = ds->dst;
1338 struct dsa_port *dp;
1340 dsa_switch_for_each_port(dp, ds)
1341 if (dp->index == index)
1344 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1351 mutex_init(&dp->addr_lists_lock);
1352 mutex_init(&dp->vlans_lock);
1353 INIT_LIST_HEAD(&dp->fdbs);
1354 INIT_LIST_HEAD(&dp->mdbs);
1355 INIT_LIST_HEAD(&dp->vlans);
1356 INIT_LIST_HEAD(&dp->list);
1357 list_add_tail(&dp->list, &dst->ports);
1362 static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1367 dp->type = DSA_PORT_TYPE_USER;
1373 static int dsa_port_parse_dsa(struct dsa_port *dp)
1375 dp->type = DSA_PORT_TYPE_DSA;
1380 static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1381 struct net_device *master)
1383 enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1384 struct dsa_switch *mds, *ds = dp->ds;
1385 unsigned int mdp_upstream;
1386 struct dsa_port *mdp;
1388 /* It is possible to stack DSA switches onto one another when that
1389 * happens the switch driver may want to know if its tagging protocol
1390 * is going to work in such a configuration.
1392 if (dsa_slave_dev_check(master)) {
1393 mdp = dsa_slave_to_port(master);
1395 mdp_upstream = dsa_upstream_port(mds, mdp->index);
1396 tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1397 DSA_TAG_PROTO_NONE);
1400 /* If the master device is not itself a DSA slave in a disjoint DSA
1401 * tree, then return immediately.
1403 return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1406 static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
1407 const char *user_protocol)
1409 struct dsa_switch *ds = dp->ds;
1410 struct dsa_switch_tree *dst = ds->dst;
1411 const struct dsa_device_ops *tag_ops;
1412 enum dsa_tag_protocol default_proto;
1414 /* Find out which protocol the switch would prefer. */
1415 default_proto = dsa_get_tag_protocol(dp, master);
1416 if (dst->default_proto) {
1417 if (dst->default_proto != default_proto) {
1419 "A DSA switch tree can have only one tagging protocol\n");
1423 dst->default_proto = default_proto;
1426 /* See if the user wants to override that preference. */
1427 if (user_protocol) {
1428 if (!ds->ops->change_tag_protocol) {
1429 dev_err(ds->dev, "Tag protocol cannot be modified\n");
1433 tag_ops = dsa_find_tagger_by_name(user_protocol);
1435 tag_ops = dsa_tag_driver_get(default_proto);
1438 if (IS_ERR(tag_ops)) {
1439 if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1440 return -EPROBE_DEFER;
1442 dev_warn(ds->dev, "No tagger for this switch\n");
1443 return PTR_ERR(tag_ops);
1447 if (dst->tag_ops != tag_ops) {
1449 "A DSA switch tree can have only one tagging protocol\n");
1451 dsa_tag_driver_put(tag_ops);
1455 /* In the case of multiple CPU ports per switch, the tagging
1456 * protocol is still reference-counted only per switch tree.
1458 dsa_tag_driver_put(tag_ops);
1460 dst->tag_ops = tag_ops;
1463 dp->master = master;
1464 dp->type = DSA_PORT_TYPE_CPU;
1465 dsa_port_set_tag_protocol(dp, dst->tag_ops);
1468 /* At this point, the tree may be configured to use a different
1469 * tagger than the one chosen by the switch driver during
1470 * .setup, in the case when a user selects a custom protocol
1473 * This is resolved by syncing the driver with the tree in
1474 * dsa_switch_setup_tag_protocol once .setup has run and the
1475 * driver is ready to accept calls to .change_tag_protocol. If
1476 * the driver does not support the custom protocol at that
1477 * point, the tree is wholly rejected, thereby ensuring that the
1478 * tree and driver are always in agreement on the protocol to
1484 static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1486 struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1487 const char *name = of_get_property(dn, "label", NULL);
1488 bool link = of_property_read_bool(dn, "link");
1493 struct net_device *master;
1494 const char *user_protocol;
1496 master = of_find_net_device_by_node(ethernet);
1497 of_node_put(ethernet);
1499 return -EPROBE_DEFER;
1501 user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1502 return dsa_port_parse_cpu(dp, master, user_protocol);
1506 return dsa_port_parse_dsa(dp);
1508 return dsa_port_parse_user(dp, name);
1511 static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1512 struct device_node *dn)
1514 struct device_node *ports, *port;
1515 struct dsa_port *dp;
1519 ports = of_get_child_by_name(dn, "ports");
1521 /* The second possibility is "ethernet-ports" */
1522 ports = of_get_child_by_name(dn, "ethernet-ports");
1524 dev_err(ds->dev, "no ports child node found\n");
1529 for_each_available_child_of_node(ports, port) {
1530 err = of_property_read_u32(port, "reg", ®);
1536 if (reg >= ds->num_ports) {
1537 dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n",
1538 port, reg, ds->num_ports);
1544 dp = dsa_to_port(ds, reg);
1546 err = dsa_port_parse_of(dp, port);
1558 static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1559 struct device_node *dn)
1561 u32 m[2] = { 0, 0 };
1564 /* Don't error out if this optional property isn't found */
1565 sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1566 if (sz < 0 && sz != -EINVAL)
1571 ds->dst = dsa_tree_touch(m[0]);
1575 if (dsa_switch_find(ds->dst->index, ds->index)) {
1577 "A DSA switch with index %d already exists in tree %d\n",
1578 ds->index, ds->dst->index);
1582 if (ds->dst->last_switch < ds->index)
1583 ds->dst->last_switch = ds->index;
1588 static int dsa_switch_touch_ports(struct dsa_switch *ds)
1590 struct dsa_port *dp;
1593 for (port = 0; port < ds->num_ports; port++) {
1594 dp = dsa_port_touch(ds, port);
1602 static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1606 err = dsa_switch_parse_member_of(ds, dn);
1610 err = dsa_switch_touch_ports(ds);
1614 return dsa_switch_parse_ports_of(ds, dn);
1617 static int dsa_port_parse(struct dsa_port *dp, const char *name,
1620 if (!strcmp(name, "cpu")) {
1621 struct net_device *master;
1623 master = dsa_dev_to_net_device(dev);
1625 return -EPROBE_DEFER;
1629 return dsa_port_parse_cpu(dp, master, NULL);
1632 if (!strcmp(name, "dsa"))
1633 return dsa_port_parse_dsa(dp);
1635 return dsa_port_parse_user(dp, name);
1638 static int dsa_switch_parse_ports(struct dsa_switch *ds,
1639 struct dsa_chip_data *cd)
1641 bool valid_name_found = false;
1642 struct dsa_port *dp;
1648 for (i = 0; i < DSA_MAX_PORTS; i++) {
1649 name = cd->port_names[i];
1650 dev = cd->netdev[i];
1651 dp = dsa_to_port(ds, i);
1656 err = dsa_port_parse(dp, name, dev);
1660 valid_name_found = true;
1663 if (!valid_name_found && i == DSA_MAX_PORTS)
1669 static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1675 /* We don't support interconnected switches nor multiple trees via
1676 * platform data, so this is the unique switch of the tree.
1679 ds->dst = dsa_tree_touch(0);
1683 err = dsa_switch_touch_ports(ds);
1687 return dsa_switch_parse_ports(ds, cd);
1690 static void dsa_switch_release_ports(struct dsa_switch *ds)
1692 struct dsa_port *dp, *next;
1694 dsa_switch_for_each_port_safe(dp, next, ds) {
1695 WARN_ON(!list_empty(&dp->fdbs));
1696 WARN_ON(!list_empty(&dp->mdbs));
1697 WARN_ON(!list_empty(&dp->vlans));
1698 list_del(&dp->list);
1703 static int dsa_switch_probe(struct dsa_switch *ds)
1705 struct dsa_switch_tree *dst;
1706 struct dsa_chip_data *pdata;
1707 struct device_node *np;
1713 pdata = ds->dev->platform_data;
1714 np = ds->dev->of_node;
1720 err = dsa_switch_parse_of(ds, np);
1722 dsa_switch_release_ports(ds);
1724 err = dsa_switch_parse(ds, pdata);
1726 dsa_switch_release_ports(ds);
1736 err = dsa_tree_setup(dst);
1738 dsa_switch_release_ports(ds);
1745 int dsa_register_switch(struct dsa_switch *ds)
1749 mutex_lock(&dsa2_mutex);
1750 err = dsa_switch_probe(ds);
1751 dsa_tree_put(ds->dst);
1752 mutex_unlock(&dsa2_mutex);
1756 EXPORT_SYMBOL_GPL(dsa_register_switch);
1758 static void dsa_switch_remove(struct dsa_switch *ds)
1760 struct dsa_switch_tree *dst = ds->dst;
1762 dsa_tree_teardown(dst);
1763 dsa_switch_release_ports(ds);
1767 void dsa_unregister_switch(struct dsa_switch *ds)
1769 mutex_lock(&dsa2_mutex);
1770 dsa_switch_remove(ds);
1771 mutex_unlock(&dsa2_mutex);
1773 EXPORT_SYMBOL_GPL(dsa_unregister_switch);
1775 /* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
1776 * blocking that operation from completion, due to the dev_hold taken inside
1777 * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
1778 * the DSA master, so that the system can reboot successfully.
1780 void dsa_switch_shutdown(struct dsa_switch *ds)
1782 struct net_device *master, *slave_dev;
1783 struct dsa_port *dp;
1785 mutex_lock(&dsa2_mutex);
1792 dsa_switch_for_each_user_port(dp, ds) {
1793 master = dp->cpu_dp->master;
1794 slave_dev = dp->slave;
1796 netdev_upper_dev_unlink(master, slave_dev);
1799 /* Disconnect from further netdevice notifiers on the master,
1800 * since netdev_uses_dsa() will now return false.
1802 dsa_switch_for_each_cpu_port(dp, ds)
1803 dp->master->dsa_ptr = NULL;
1807 mutex_unlock(&dsa2_mutex);
1809 EXPORT_SYMBOL_GPL(dsa_switch_shutdown);